Skip to content

Commit c0365aa

Browse files
committed
[X86] Standardize shuffle match/lowering function names. NFC.
We mainly use lowerShuffle*/matchShuffle* - replace the (few) lowerVectorShuffle*/matchVectorShuffle* cases to be consistent.
1 parent 3b417b7 commit c0365aa

File tree

1 file changed

+39
-38
lines changed

1 file changed

+39
-38
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

+39-38
Original file line numberDiff line numberDiff line change
@@ -10814,11 +10814,11 @@ static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
1081410814
return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
1081510815
}
1081610816

10817-
static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10818-
unsigned &UnpackOpcode, bool IsUnary,
10819-
ArrayRef<int> TargetMask,
10820-
const SDLoc &DL, SelectionDAG &DAG,
10821-
const X86Subtarget &Subtarget) {
10817+
static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10818+
unsigned &UnpackOpcode, bool IsUnary,
10819+
ArrayRef<int> TargetMask, const SDLoc &DL,
10820+
SelectionDAG &DAG,
10821+
const X86Subtarget &Subtarget) {
1082210822
int NumElts = VT.getVectorNumElements();
1082310823

1082410824
bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
@@ -10926,8 +10926,8 @@ static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
1092610926
return SDValue();
1092710927
}
1092810928

10929-
static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10930-
int Delta) {
10929+
static bool matchShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10930+
int Delta) {
1093110931
int Size = (int)Mask.size();
1093210932
int Split = Size / Delta;
1093310933
int TruncatedVectorStart = SwappedOps ? Size : 0;
@@ -11012,20 +11012,19 @@ static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
1101211012

1101311013
// The first half/quarter of the mask should refer to every second/fourth
1101411014
// element of the vector truncated and bitcasted.
11015-
if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11016-
!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
11015+
if (!matchShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11016+
!matchShuffleAsVPMOV(Mask, SwappedOps, 4))
1101711017
return SDValue();
1101811018

1101911019
return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
1102011020
}
1102111021

1102211022
// X86 has dedicated pack instructions that can handle specific truncation
1102311023
// operations: PACKSS and PACKUS.
11024-
static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
11025-
SDValue &V2, unsigned &PackOpcode,
11026-
ArrayRef<int> TargetMask,
11027-
SelectionDAG &DAG,
11028-
const X86Subtarget &Subtarget) {
11024+
static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
11025+
unsigned &PackOpcode, ArrayRef<int> TargetMask,
11026+
SelectionDAG &DAG,
11027+
const X86Subtarget &Subtarget) {
1102911028
unsigned NumElts = VT.getVectorNumElements();
1103011029
unsigned BitSize = VT.getScalarSizeInBits();
1103111030
MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
@@ -11078,8 +11077,8 @@ static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
1107811077
const X86Subtarget &Subtarget) {
1107911078
MVT PackVT;
1108011079
unsigned PackOpcode;
11081-
if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11082-
Subtarget))
11080+
if (matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11081+
Subtarget))
1108311082
return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
1108411083
DAG.getBitcast(PackVT, V2));
1108511084

@@ -11170,10 +11169,10 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
1117011169
const X86Subtarget &Subtarget,
1117111170
SelectionDAG &DAG);
1117211171

11173-
static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
11174-
MutableArrayRef<int> Mask,
11175-
const APInt &Zeroable, bool &ForceV1Zero,
11176-
bool &ForceV2Zero, uint64_t &BlendMask) {
11172+
static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
11173+
MutableArrayRef<int> Mask,
11174+
const APInt &Zeroable, bool &ForceV1Zero,
11175+
bool &ForceV2Zero, uint64_t &BlendMask) {
1117711176
bool V1IsZeroOrUndef =
1117811177
V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
1117911178
bool V2IsZeroOrUndef =
@@ -11236,8 +11235,8 @@ static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
1123611235
uint64_t BlendMask = 0;
1123711236
bool ForceV1Zero = false, ForceV2Zero = false;
1123811237
SmallVector<int, 64> Mask(Original.begin(), Original.end());
11239-
if (!matchVectorShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11240-
BlendMask))
11238+
if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11239+
BlendMask))
1124111240
return SDValue();
1124211241

1124311242
// Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
@@ -11807,9 +11806,11 @@ static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
1180711806
}
1180811807

1180911808
/// Try to lower a vector shuffle as a byte shift sequence.
11810-
static SDValue lowerVectorShuffleAsByteShiftMask(
11811-
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11812-
const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11809+
static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11810+
SDValue V2, ArrayRef<int> Mask,
11811+
const APInt &Zeroable,
11812+
const X86Subtarget &Subtarget,
11813+
SelectionDAG &DAG) {
1181311814
assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
1181411815
assert(VT.is128BitVector() && "Only 128-bit vectors supported");
1181511816

@@ -14254,8 +14255,8 @@ static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
1425414255
return BitBlend;
1425514256

1425614257
// Try to use byte shift instructions to mask.
14257-
if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14258-
DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14258+
if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
14259+
Zeroable, Subtarget, DAG))
1425914260
return V;
1426014261

1426114262
// Try to lower by permuting the inputs into an unpack instruction.
@@ -14516,8 +14517,8 @@ static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
1451614517
return V;
1451714518

1451814519
// Try to use byte shift instructions to mask.
14519-
if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14520-
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14520+
if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14521+
Zeroable, Subtarget, DAG))
1452114522
return V;
1452214523

1452314524
// Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
@@ -17243,8 +17244,8 @@ static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
1724317244
/// above in helper routines. The canonicalization attempts to widen shuffles
1724417245
/// to involve fewer lanes of wider elements, consolidate symmetric patterns
1724517246
/// s.t. only one of the two inputs needs to be tested, etc.
17246-
static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
17247-
SelectionDAG &DAG) {
17247+
static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17248+
SelectionDAG &DAG) {
1724817249
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
1724917250
ArrayRef<int> OrigMask = SVOp->getMask();
1725017251
SDValue V1 = Op.getOperand(0);
@@ -28457,7 +28458,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2845728458
case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
2845828459
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2845928460
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
28460-
case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
28461+
case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
2846128462
case ISD::VSELECT: return LowerVSELECT(Op, DAG);
2846228463
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2846328464
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
@@ -33075,8 +33076,8 @@ static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
3307533076
if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
3307633077
((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
3307733078
((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
33078-
if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
33079-
Subtarget)) {
33079+
if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
33080+
Subtarget)) {
3308033081
DstVT = MaskVT;
3308133082
return true;
3308233083
}
@@ -33088,8 +33089,8 @@ static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
3308833089
(MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
3308933090
(MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
3309033091
(MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
33091-
if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
33092-
DAG, Subtarget)) {
33092+
if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
33093+
Subtarget)) {
3309333094
SrcVT = DstVT = MaskVT;
3309433095
if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
3309533096
SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
@@ -33127,8 +33128,8 @@ static bool matchBinaryPermuteShuffle(
3312733128
uint64_t BlendMask = 0;
3312833129
bool ForceV1Zero = false, ForceV2Zero = false;
3312933130
SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
33130-
if (matchVectorShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
33131-
ForceV2Zero, BlendMask)) {
33131+
if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
33132+
ForceV2Zero, BlendMask)) {
3313233133
if (MaskVT == MVT::v16i16) {
3313333134
// We can only use v16i16 PBLENDW if the lanes are repeated.
3313433135
SmallVector<int, 8> RepeatedMask;

0 commit comments

Comments
 (0)