diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 91f696e8fe88e..a4598efc01996 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -12987,6 +12987,85 @@ SDValue DAGCombiner::visitVP_SELECT(SDNode *N) { return SDValue(); } +static SDValue combineVSelectWithAllOnesOrZeros(SDValue Cond, SDValue TVal, + SDValue FVal, + const TargetLowering &TLI, + SelectionDAG &DAG, + const SDLoc &DL) { + if (!TLI.isTypeLegal(TVal.getValueType())) + return SDValue(); + + EVT VT = TVal.getValueType(); + EVT CondVT = Cond.getValueType(); + + assert(CondVT.isVector() && "Vector select expects a vector selector!"); + + // Classify TVal/FVal content + bool IsTAllZero = ISD::isBuildVectorAllZeros(TVal.getNode()); + bool IsTAllOne = ISD::isBuildVectorAllOnes(TVal.getNode()); + bool IsFAllZero = ISD::isBuildVectorAllZeros(FVal.getNode()); + bool IsFAllOne = ISD::isBuildVectorAllOnes(FVal.getNode()); + + // no vselect(cond, 0/-1, X) or vselect(cond, X, 0/-1), return + if (!(IsTAllZero || IsTAllOne || IsFAllZero || IsFAllOne)) + return SDValue(); + + // select Cond, 0, 0 → 0 + if (IsTAllZero && IsFAllZero) { + return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, DL, VT) + : DAG.getConstant(0, DL, VT); + } + + // To use the condition operand as a bitwise mask, it must have elements that + // are the same size as the select elements. Ie, the condition operand must + // have already been promoted from the IR select condition type . + // Don't check if the types themselves are equal because that excludes + // vector floating-point selects. + if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) + return SDValue(); + + // Try inverting Cond and swapping T/F if it gives all-ones/all-zeros form + if (!IsTAllOne && !IsFAllZero && Cond.hasOneUse() && + Cond.getOpcode() == ISD::SETCC && + TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) == + CondVT) { + if (IsTAllZero || IsFAllOne) { + SDValue CC = Cond.getOperand(2); + ISD::CondCode InverseCC = ISD::getSetCCInverse( + cast(CC)->get(), Cond.getOperand(0).getValueType()); + Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), + InverseCC); + std::swap(TVal, FVal); + std::swap(IsTAllOne, IsFAllOne); + std::swap(IsTAllZero, IsFAllZero); + } + } + + // Cond value must be 'sign splat' to be converted to a logical op. + if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits()) + return SDValue(); + + // select Cond, -1, 0 → bitcast Cond + if (IsTAllOne && IsFAllZero) + return DAG.getBitcast(VT, Cond); + + // select Cond, -1, x → or Cond, x + if (IsTAllOne) { + SDValue X = DAG.getBitcast(CondVT, FVal); + SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, X); + return DAG.getBitcast(VT, Or); + } + + // select Cond, x, 0 → and Cond, x + if (IsFAllZero) { + SDValue X = DAG.getBitcast(CondVT, TVal); + SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, X); + return DAG.getBitcast(VT, And); + } + + return SDValue(); +} + SDValue DAGCombiner::visitVSELECT(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -13255,6 +13334,9 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) { if (SimplifyDemandedVectorElts(SDValue(N, 0))) return SDValue(N, 0); + if (SDValue V = combineVSelectWithAllOnesOrZeros(N0, N1, N2, TLI, DAG, DL)) + return V; + return SDValue(); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 7c26dd6e2dc2f..540f28a26ef43 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -47264,13 +47264,14 @@ static SDValue combineToExtendBoolVectorInReg( DAG.getConstant(EltSizeInBits - 1, DL, VT)); } -/// If a vector select has an operand that is -1 or 0, try to simplify the +/// If a vector select has an left operand that is 0, try to simplify the /// select to a bitwise logic operation. -/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()? -static SDValue -combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, - TargetLowering::DAGCombinerInfo &DCI, - const X86Subtarget &Subtarget) { +/// TODO: Move to DAGCombiner.combineVSelectWithAllOnesOrZeros, possibly using +/// TargetLowering::hasAndNot()? +static SDValue combineVSelectWithLastZeros(SDNode *N, SelectionDAG &DAG, + const SDLoc &DL, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget &Subtarget) { SDValue Cond = N->getOperand(0); SDValue LHS = N->getOperand(1); SDValue RHS = N->getOperand(2); @@ -47283,20 +47284,6 @@ combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, assert(CondVT.isVector() && "Vector select expects a vector selector!"); - // TODO: Use isNullOrNullSplat() to distinguish constants with undefs? - // TODO: Can we assert that both operands are not zeros (because that should - // get simplified at node creation time)? - bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode()); - bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); - - // If both inputs are 0/undef, create a complete zero vector. - // FIXME: As noted above this should be handled by DAGCombiner/getNode. - if (TValIsAllZeros && FValIsAllZeros) { - if (VT.isFloatingPoint()) - return DAG.getConstantFP(0.0, DL, VT); - return DAG.getConstant(0, DL, VT); - } - // To use the condition operand as a bitwise mask, it must have elements that // are the same size as the select elements. Ie, the condition operand must // have already been promoted from the IR select condition type . @@ -47305,56 +47292,15 @@ combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) return SDValue(); - // Try to invert the condition if true value is not all 1s and false value is - // not all 0s. Only do this if the condition has one use. - bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode()); - if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() && - // Check if the selector will be produced by CMPP*/PCMP*. - Cond.getOpcode() == ISD::SETCC && - // Check if SETCC has already been promoted. - TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) == - CondVT) { - bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode()); - - if (TValIsAllZeros || FValIsAllOnes) { - SDValue CC = Cond.getOperand(2); - ISD::CondCode NewCC = ISD::getSetCCInverse( - cast(CC)->get(), Cond.getOperand(0).getValueType()); - Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), - NewCC); - std::swap(LHS, RHS); - TValIsAllOnes = FValIsAllOnes; - FValIsAllZeros = TValIsAllZeros; - } - } - // Cond value must be 'sign splat' to be converted to a logical op. if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits()) return SDValue(); - // vselect Cond, 111..., 000... -> Cond - if (TValIsAllOnes && FValIsAllZeros) - return DAG.getBitcast(VT, Cond); - if (!TLI.isTypeLegal(CondVT)) return SDValue(); - // vselect Cond, 111..., X -> or Cond, X - if (TValIsAllOnes) { - SDValue CastRHS = DAG.getBitcast(CondVT, RHS); - SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS); - return DAG.getBitcast(VT, Or); - } - - // vselect Cond, X, 000... -> and Cond, X - if (FValIsAllZeros) { - SDValue CastLHS = DAG.getBitcast(CondVT, LHS); - SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS); - return DAG.getBitcast(VT, And); - } - // vselect Cond, 000..., X -> andn Cond, X - if (TValIsAllZeros) { + if (ISD::isBuildVectorAllZeros(LHS.getNode())) { SDValue CastRHS = DAG.getBitcast(CondVT, RHS); SDValue AndN; // The canonical form differs for i1 vectors - x86andnp is not used @@ -48115,7 +48061,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG, if (!TLI.isTypeLegal(VT) || isSoftF16(VT, Subtarget)) return SDValue(); - if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DL, DCI, Subtarget)) + if (SDValue V = combineVSelectWithLastZeros(N, DAG, DL, DCI, Subtarget)) return V; if (SDValue V = combineVSelectToBLENDV(N, DAG, DL, DCI, Subtarget)) diff --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll index b24e54a68fb42..20d0c7f1b7085 100644 --- a/llvm/test/CodeGen/AArch64/arm64-zip.ll +++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll @@ -413,7 +413,7 @@ define <4 x float> @shuffle_zip1(<4 x float> %arg) { ; CHECK-NEXT: fmov.4s v1, #1.00000000 ; CHECK-NEXT: zip1.4h v0, v0, v0 ; CHECK-NEXT: sshll.4s v0, v0, #0 -; CHECK-NEXT: and.16b v0, v1, v0 +; CHECK-NEXT: and.16b v0, v0, v1 ; CHECK-NEXT: ret bb: %inst = fcmp olt <4 x float> zeroinitializer, %arg diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll index b4f179e992a0d..6bbbcf88167d8 100644 --- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll +++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll @@ -114,9 +114,10 @@ define i64 @not_sign_i64_4(i64 %a) { define <7 x i8> @sign_7xi8(<7 x i8> %a) { ; CHECK-LABEL: sign_7xi8: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.8b, #1 -; CHECK-NEXT: cmlt v0.8b, v0.8b, #0 -; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-NEXT: movi v1.2d, #0xffffffffffffffff +; CHECK-NEXT: movi v2.8b, #1 +; CHECK-NEXT: cmge v0.8b, v1.8b, v0.8b +; CHECK-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-NEXT: ret %c = icmp sgt <7 x i8> %a, %res = select <7 x i1> %c, <7 x i8> , <7 x i8> @@ -150,7 +151,8 @@ define <16 x i8> @sign_16xi8(<16 x i8> %a) { define <3 x i32> @sign_3xi32(<3 x i32> %a) { ; CHECK-LABEL: sign_3xi32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmlt v0.4s, v0.4s, #0 +; CHECK-NEXT: movi v1.2d, #0xffffffffffffffff +; CHECK-NEXT: cmge v0.4s, v1.4s, v0.4s ; CHECK-NEXT: orr v0.4s, #1 ; CHECK-NEXT: ret %c = icmp sgt <3 x i32> %a, @@ -197,11 +199,9 @@ define <4 x i32> @not_sign_4xi32(<4 x i32> %a) { ; CHECK-LABEL: not_sign_4xi32: ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, .LCPI16_0 -; CHECK-NEXT: movi v2.4s, #1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] -; CHECK-NEXT: cmgt v0.4s, v0.4s, v1.4s -; CHECK-NEXT: and v1.16b, v0.16b, v2.16b -; CHECK-NEXT: orn v0.16b, v1.16b, v0.16b +; CHECK-NEXT: cmge v0.4s, v1.4s, v0.4s +; CHECK-NEXT: orr v0.4s, #1 ; CHECK-NEXT: ret %c = icmp sgt <4 x i32> %a, %res = select <4 x i1> %c, <4 x i32> , <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/concatbinop.ll b/llvm/test/CodeGen/AArch64/concatbinop.ll index 828182d18b38c..062a5a8c35b2c 100644 --- a/llvm/test/CodeGen/AArch64/concatbinop.ll +++ b/llvm/test/CodeGen/AArch64/concatbinop.ll @@ -179,7 +179,7 @@ define <16 x i8> @signOf_neon(ptr nocapture noundef readonly %a, ptr nocapture n ; CHECK-NEXT: uzp1 v3.16b, v5.16b, v6.16b ; CHECK-NEXT: uzp1 v1.16b, v1.16b, v2.16b ; CHECK-NEXT: and v0.16b, v3.16b, v0.16b -; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b +; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b ; CHECK-NEXT: ret entry: %0 = load <8 x i16>, ptr %a, align 2 diff --git a/llvm/test/CodeGen/AArch64/sat-add.ll b/llvm/test/CodeGen/AArch64/sat-add.ll index 2deb19be24821..ecd48d6b7c65b 100644 --- a/llvm/test/CodeGen/AArch64/sat-add.ll +++ b/llvm/test/CodeGen/AArch64/sat-add.ll @@ -530,7 +530,7 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16 ; CHECK-NEXT: mvn v2.16b, v1.16b ; CHECK-NEXT: add v1.16b, v0.16b, v1.16b ; CHECK-NEXT: cmhi v0.16b, v0.16b, v2.16b -; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <16 x i8> %y, %a = add <16 x i8> %x, %y @@ -570,7 +570,7 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8 ; CHECK-NEXT: mvn v2.16b, v1.16b ; CHECK-NEXT: add v1.8h, v0.8h, v1.8h ; CHECK-NEXT: cmhi v0.8h, v0.8h, v2.8h -; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <8 x i16> %y, %a = add <8 x i16> %x, %y @@ -610,7 +610,7 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4 ; CHECK-NEXT: mvn v2.16b, v1.16b ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v2.4s -; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <4 x i32> %y, %a = add <4 x i32> %x, %y @@ -651,7 +651,7 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2 ; CHECK-NEXT: mvn v2.16b, v1.16b ; CHECK-NEXT: add v1.2d, v0.2d, v1.2d ; CHECK-NEXT: cmhi v0.2d, v0.2d, v2.2d -; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b +; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %noty = xor <2 x i64> %y, %a = add <2 x i64> %x, %y diff --git a/llvm/test/CodeGen/AArch64/select_cc.ll b/llvm/test/CodeGen/AArch64/select_cc.ll index 73e4d4c7f0aeb..483f6c26af8c1 100644 --- a/llvm/test/CodeGen/AArch64/select_cc.ll +++ b/llvm/test/CodeGen/AArch64/select_cc.ll @@ -88,7 +88,7 @@ define <2 x double> @select_olt_load_cmp(<2 x double> %a, ptr %src) { ; CHECK-SD-NEXT: ldr d1, [x0] ; CHECK-SD-NEXT: fcmgt v1.2s, v1.2s, #0.0 ; CHECK-SD-NEXT: sshll v1.2d, v1.2s, #0 -; CHECK-SD-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: select_olt_load_cmp: diff --git a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll index 32fc9c1377704..0d4a636446164 100644 --- a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll +++ b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll @@ -249,9 +249,6 @@ define <16 x i8> @sel_shift_bool_v16i8(<16 x i1> %t) { ; CHECK-SD-LABEL: sel_shift_bool_v16i8: ; CHECK-SD: // %bb.0: ; CHECK-SD-NEXT: shl v0.16b, v0.16b, #7 -; CHECK-SD-NEXT: movi v1.16b, #128 -; CHECK-SD-NEXT: cmlt v0.16b, v0.16b, #0 -; CHECK-SD-NEXT: and v0.16b, v0.16b, v1.16b ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: sel_shift_bool_v16i8: diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll index b5d64112db727..aa0a163b96ac8 100644 --- a/llvm/test/CodeGen/AArch64/tbl-loops.ll +++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll @@ -31,12 +31,12 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n ; CHECK-NEXT: add x13, x13, #32 ; CHECK-NEXT: fcmgt v3.4s, v1.4s, v0.4s ; CHECK-NEXT: fcmgt v4.4s, v2.4s, v0.4s -; CHECK-NEXT: fcmlt v5.4s, v1.4s, #0.0 -; CHECK-NEXT: fcmlt v6.4s, v2.4s, #0.0 -; CHECK-NEXT: bit v1.16b, v0.16b, v3.16b -; CHECK-NEXT: bit v2.16b, v0.16b, v4.16b -; CHECK-NEXT: bic v1.16b, v1.16b, v5.16b -; CHECK-NEXT: bic v2.16b, v2.16b, v6.16b +; CHECK-NEXT: bsl v3.16b, v0.16b, v1.16b +; CHECK-NEXT: bsl v4.16b, v0.16b, v2.16b +; CHECK-NEXT: fcmlt v1.4s, v1.4s, #0.0 +; CHECK-NEXT: fcmlt v2.4s, v2.4s, #0.0 +; CHECK-NEXT: bic v1.16b, v3.16b, v1.16b +; CHECK-NEXT: bic v2.16b, v4.16b, v2.16b ; CHECK-NEXT: fcvtzs v1.4s, v1.4s ; CHECK-NEXT: fcvtzs v2.4s, v2.4s ; CHECK-NEXT: xtn v1.4h, v1.4s diff --git a/llvm/test/CodeGen/AArch64/vselect-constants.ll b/llvm/test/CodeGen/AArch64/vselect-constants.ll index a7cf5ece5d270..fe125c9626ea3 100644 --- a/llvm/test/CodeGen/AArch64/vselect-constants.ll +++ b/llvm/test/CodeGen/AArch64/vselect-constants.ll @@ -146,10 +146,8 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_1_or_0_vec: ; CHECK: // %bb.0: -; CHECK-NEXT: ushll v0.4s, v0.4h, #0 ; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: shl v0.4s, v0.4s, #31 -; CHECK-NEXT: cmlt v0.4s, v0.4s, #0 +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 ; CHECK-NEXT: and v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/vselect-ext.ll b/llvm/test/CodeGen/AArch64/vselect-ext.ll index 76b7f3d9dfc0e..4f2b9c5a62669 100644 --- a/llvm/test/CodeGen/AArch64/vselect-ext.ll +++ b/llvm/test/CodeGen/AArch64/vselect-ext.ll @@ -12,10 +12,10 @@ define <16 x i32> @no_existing_zext(<16 x i8> %a, <16 x i32> %op) { ; CHECK-NEXT: sshll.4s v6, v5, #0 ; CHECK-NEXT: sshll.4s v7, v0, #0 ; CHECK-NEXT: sshll2.4s v5, v5, #0 -; CHECK-NEXT: and.16b v4, v4, v16 -; CHECK-NEXT: and.16b v0, v1, v6 -; CHECK-NEXT: and.16b v1, v2, v5 -; CHECK-NEXT: and.16b v2, v3, v7 +; CHECK-NEXT: and.16b v4, v16, v4 +; CHECK-NEXT: and.16b v0, v6, v1 +; CHECK-NEXT: and.16b v1, v5, v2 +; CHECK-NEXT: and.16b v2, v7, v3 ; CHECK-NEXT: mov.16b v3, v4 ; CHECK-NEXT: ret entry: @@ -40,10 +40,10 @@ define <16 x i32> @second_compare_operand_not_splat(<16 x i8> %a, <16 x i8> %b) ; CHECK-NEXT: sshll.4s v7, v1, #0 ; CHECK-NEXT: sshll2.4s v16, v3, #0 ; CHECK-NEXT: sshll2.4s v1, v1, #0 -; CHECK-NEXT: and.16b v0, v4, v0 -; CHECK-NEXT: and.16b v3, v6, v1 -; CHECK-NEXT: and.16b v1, v2, v16 -; CHECK-NEXT: and.16b v2, v5, v7 +; CHECK-NEXT: and.16b v0, v0, v4 +; CHECK-NEXT: and.16b v3, v1, v6 +; CHECK-NEXT: and.16b v1, v16, v2 +; CHECK-NEXT: and.16b v2, v7, v5 ; CHECK-NEXT: ret entry: %ext = zext <16 x i8> %a to <16 x i32> @@ -69,10 +69,10 @@ define <16 x i32> @same_zext_used_in_cmp_signed_pred_and_select(<16 x i8> %a) { ; CHECK-NEXT: sshll.4s v7, v1, #0 ; CHECK-NEXT: sshll2.4s v16, v3, #0 ; CHECK-NEXT: sshll2.4s v1, v1, #0 -; CHECK-NEXT: and.16b v0, v4, v0 -; CHECK-NEXT: and.16b v3, v6, v1 -; CHECK-NEXT: and.16b v1, v2, v16 -; CHECK-NEXT: and.16b v2, v5, v7 +; CHECK-NEXT: and.16b v0, v0, v4 +; CHECK-NEXT: and.16b v3, v1, v6 +; CHECK-NEXT: and.16b v1, v16, v2 +; CHECK-NEXT: and.16b v2, v7, v5 ; CHECK-NEXT: ret entry: %ext = zext <16 x i8> %a to <16 x i32> @@ -97,10 +97,10 @@ define <8 x i64> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i64(<8 x i8> ; CHECK-NEXT: cmhi.2d v7, v1, v2 ; CHECK-NEXT: cmhi.2d v6, v5, v2 ; CHECK-NEXT: cmhi.2d v2, v4, v2 -; CHECK-NEXT: and.16b v0, v3, v0 -; CHECK-NEXT: and.16b v1, v1, v7 -; CHECK-NEXT: and.16b v3, v4, v2 -; CHECK-NEXT: and.16b v2, v5, v6 +; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v7, v1 +; CHECK-NEXT: and.16b v3, v2, v4 +; CHECK-NEXT: and.16b v2, v6, v5 ; CHECK-NEXT: ret %ext = zext <8 x i8> %a to <8 x i64> %cmp = icmp ugt <8 x i8> %a, @@ -123,10 +123,10 @@ define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v16i32(<16 x i ; CHECK-NEXT: cmhi.4s v7, v2, v1 ; CHECK-NEXT: cmhi.4s v6, v5, v1 ; CHECK-NEXT: cmhi.4s v1, v4, v1 -; CHECK-NEXT: and.16b v0, v3, v0 -; CHECK-NEXT: and.16b v3, v4, v1 -; CHECK-NEXT: and.16b v1, v2, v7 -; CHECK-NEXT: and.16b v2, v5, v6 +; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v3, v1, v4 +; CHECK-NEXT: and.16b v1, v7, v2 +; CHECK-NEXT: and.16b v2, v6, v5 ; CHECK-NEXT: ret %ext = zext <16 x i8> %a to <16 x i32> %cmp = icmp ugt <16 x i8> %a, @@ -143,8 +143,8 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32(<8 x i8> ; CHECK-NEXT: ushll.4s v0, v0, #0 ; CHECK-NEXT: cmhi.4s v3, v0, v1 ; CHECK-NEXT: cmhi.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret %ext = zext <8 x i8> %a to <8 x i32> %cmp = icmp ugt <8 x i8> %a, @@ -160,8 +160,8 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32_2(<8 x i1 ; CHECK-NEXT: ushll.4s v0, v0, #0 ; CHECK-NEXT: cmhi.4s v3, v0, v1 ; CHECK-NEXT: cmhi.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret %ext = zext <8 x i16> %a to <8 x i32> %cmp = icmp ugt <8 x i16> %a, @@ -179,8 +179,8 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i1 ; CHECK-NEXT: ushll.4s v0, v0, #0 ; CHECK-NEXT: cmhi.4s v3, v0, v1 ; CHECK-NEXT: cmhi.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret %ext = zext <8 x i15> %a to <8 x i32> %cmp = icmp ugt <8 x i15> %a, @@ -197,8 +197,8 @@ define <7 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v7i32(<7 x i16> ; CHECK-NEXT: ushll2.4s v0, v0, #0 ; CHECK-NEXT: sshll.4s v3, v1, #0 ; CHECK-NEXT: sshll2.4s v1, v1, #0 -; CHECK-NEXT: and.16b v2, v2, v3 -; CHECK-NEXT: and.16b v0, v0, v1 +; CHECK-NEXT: and.16b v2, v3, v2 +; CHECK-NEXT: and.16b v0, v1, v0 ; CHECK-NEXT: mov.s w1, v2[1] ; CHECK-NEXT: mov.s w2, v2[2] ; CHECK-NEXT: mov.s w3, v2[3] @@ -244,7 +244,7 @@ define <4 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v4i32(<4 x i16> ; CHECK-NEXT: movi.4s v1, #10 ; CHECK-NEXT: ushll.4s v0, v0, #0 ; CHECK-NEXT: cmhi.4s v1, v0, v1 -; CHECK-NEXT: and.16b v0, v0, v1 +; CHECK-NEXT: and.16b v0, v1, v0 ; CHECK-NEXT: ret %ext = zext <4 x i16> %a to <4 x i32> %cmp = icmp ugt <4 x i16> %a, @@ -259,7 +259,7 @@ define <2 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v2i32(<2 x i16> ; CHECK-NEXT: movi.2s v2, #10 ; CHECK-NEXT: and.8b v0, v0, v1 ; CHECK-NEXT: cmhi.2s v1, v0, v2 -; CHECK-NEXT: and.8b v0, v0, v1 +; CHECK-NEXT: and.8b v0, v1, v0 ; CHECK-NEXT: ret %ext = zext <2 x i16> %a to <2 x i32> %cmp = icmp ugt <2 x i16> %a, @@ -275,8 +275,8 @@ define <8 x i32> @same_zext_used_in_cmp_eq_and_select_v8i32(<8 x i16> %a) { ; CHECK-NEXT: ushll.4s v0, v0, #0 ; CHECK-NEXT: cmeq.4s v3, v0, v1 ; CHECK-NEXT: cmeq.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret %ext = zext <8 x i16> %a to <8 x i32> %cmp = icmp eq <8 x i16> %a, @@ -293,8 +293,8 @@ define <8 x i32> @same_zext_used_in_cmp_eq_and_select_v8i32_from_v8i13(<8 x i13> ; CHECK-NEXT: ushll.4s v0, v0, #0 ; CHECK-NEXT: cmeq.4s v3, v0, v1 ; CHECK-NEXT: cmeq.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret %ext = zext <8 x i13> %a to <8 x i32> %cmp = icmp eq <8 x i13> %a, @@ -358,16 +358,16 @@ define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_other_use(<16 ; CHECK-NEXT: and.16b v6, v6, v26 ; CHECK-NEXT: sshll2.2d v26, v16, #0 ; CHECK-NEXT: and.16b v27, v4, v27 -; CHECK-NEXT: and.16b v4, v0, v18 -; CHECK-NEXT: and.16b v0, v24, v16 +; CHECK-NEXT: and.16b v4, v18, v0 +; CHECK-NEXT: and.16b v0, v16, v24 ; CHECK-NEXT: stp q7, q21, [x0, #96] ; CHECK-NEXT: sshll.2d v21, v16, #0 ; CHECK-NEXT: and.16b v5, v5, v22 ; CHECK-NEXT: and.16b v7, v3, v23 -; CHECK-NEXT: and.16b v3, v19, v20 +; CHECK-NEXT: and.16b v3, v20, v19 ; CHECK-NEXT: stp q5, q6, [x0, #64] ; CHECK-NEXT: and.16b v6, v2, v26 -; CHECK-NEXT: and.16b v2, v25, v17 +; CHECK-NEXT: and.16b v2, v17, v25 ; CHECK-NEXT: and.16b v5, v1, v21 ; CHECK-NEXT: mov.16b v1, v3 ; CHECK-NEXT: mov.16b v3, v4 @@ -397,10 +397,10 @@ define <16 x i32> @same_sext_used_in_cmp_signed_pred_and_select_v16i32(<16 x i8> ; CHECK-NEXT: cmgt.4s v7, v2, v1 ; CHECK-NEXT: cmgt.4s v6, v5, v1 ; CHECK-NEXT: cmgt.4s v1, v4, v1 -; CHECK-NEXT: and.16b v0, v3, v0 -; CHECK-NEXT: and.16b v3, v4, v1 -; CHECK-NEXT: and.16b v1, v2, v7 -; CHECK-NEXT: and.16b v2, v5, v6 +; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v3, v1, v4 +; CHECK-NEXT: and.16b v1, v7, v2 +; CHECK-NEXT: and.16b v2, v6, v5 ; CHECK-NEXT: ret entry: %ext = sext <16 x i8> %a to <16 x i32> @@ -417,8 +417,8 @@ define <8 x i32> @same_sext_used_in_cmp_eq_and_select_v8i32(<8 x i16> %a) { ; CHECK-NEXT: sshll.4s v0, v0, #0 ; CHECK-NEXT: cmeq.4s v3, v0, v1 ; CHECK-NEXT: cmeq.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret %ext = sext <8 x i16> %a to <8 x i32> %cmp = icmp eq <8 x i16> %a, @@ -438,8 +438,8 @@ define <8 x i32> @same_sext_used_in_cmp_eq_and_select_v8i32_from_v8i13(<8 x i13> ; CHECK-NEXT: sshr.4s v2, v2, #19 ; CHECK-NEXT: cmeq.4s v3, v2, v1 ; CHECK-NEXT: cmeq.4s v1, v0, v1 -; CHECK-NEXT: and.16b v1, v0, v1 -; CHECK-NEXT: and.16b v0, v2, v3 +; CHECK-NEXT: and.16b v1, v1, v0 +; CHECK-NEXT: and.16b v0, v3, v2 ; CHECK-NEXT: ret %ext = sext <8 x i13> %a to <8 x i32> %cmp = icmp eq <8 x i13> %a, @@ -480,8 +480,8 @@ define <8 x i32> @same_sext_used_in_cmp_signed_pred_and_select_v8i32(<8 x i16> % ; CHECK-NEXT: sshll.4s v0, v0, #0 ; CHECK-NEXT: cmgt.4s v3, v0, v1 ; CHECK-NEXT: cmgt.4s v1, v2, v1 -; CHECK-NEXT: and.16b v1, v2, v1 -; CHECK-NEXT: and.16b v0, v0, v3 +; CHECK-NEXT: and.16b v1, v1, v2 +; CHECK-NEXT: and.16b v0, v3, v0 ; CHECK-NEXT: ret entry: %ext = sext <8 x i16> %a to <8 x i32> @@ -502,8 +502,8 @@ define <8 x i32> @same_sext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i1 ; CHECK-NEXT: sshr.4s v2, v2, #17 ; CHECK-NEXT: cmge.4s v3, v2, v1 ; CHECK-NEXT: cmge.4s v1, v0, v1 -; CHECK-NEXT: and.16b v1, v0, v1 -; CHECK-NEXT: and.16b v0, v2, v3 +; CHECK-NEXT: and.16b v1, v1, v0 +; CHECK-NEXT: and.16b v0, v3, v2 ; CHECK-NEXT: ret %ext = sext <8 x i15> %a to <8 x i32> %cmp = icmp sge <8 x i15> %a, @@ -524,11 +524,11 @@ define <16 x i32> @same_sext_used_in_cmp_unsigned_pred_and_select(<16 x i8> %a) ; CHECK-NEXT: ext.16b v5, v0, v0, #8 ; CHECK-NEXT: ext.16b v6, v3, v3, #8 ; CHECK-NEXT: ext.16b v7, v1, v1, #8 -; CHECK-NEXT: and.8b v2, v2, v3 -; CHECK-NEXT: and.8b v1, v0, v1 +; CHECK-NEXT: and.8b v2, v3, v2 +; CHECK-NEXT: and.8b v1, v1, v0 ; CHECK-NEXT: sshll.4s v0, v2, #0 -; CHECK-NEXT: and.8b v3, v5, v7 -; CHECK-NEXT: and.8b v4, v4, v6 +; CHECK-NEXT: and.8b v3, v7, v5 +; CHECK-NEXT: and.8b v4, v6, v4 ; CHECK-NEXT: sshll.4s v2, v1, #0 ; CHECK-NEXT: sshll.4s v3, v3, #0 ; CHECK-NEXT: sshll.4s v1, v4, #0 @@ -556,10 +556,10 @@ define <16 x i32> @same_zext_used_in_cmp_signed_pred_and_select_can_convert_to_u ; CHECK-NEXT: sshll.4s v7, v1, #0 ; CHECK-NEXT: sshll2.4s v16, v3, #0 ; CHECK-NEXT: sshll2.4s v1, v1, #0 -; CHECK-NEXT: and.16b v0, v4, v0 -; CHECK-NEXT: and.16b v3, v6, v1 -; CHECK-NEXT: and.16b v1, v2, v16 -; CHECK-NEXT: and.16b v2, v5, v7 +; CHECK-NEXT: and.16b v0, v0, v4 +; CHECK-NEXT: and.16b v3, v1, v6 +; CHECK-NEXT: and.16b v1, v16, v2 +; CHECK-NEXT: and.16b v2, v7, v5 ; CHECK-NEXT: ret entry: %ext = zext <16 x i8> %a to <16 x i32> @@ -604,10 +604,10 @@ define void @extension_in_loop_v16i8_to_v16i32(ptr %src, ptr %dst) { ; CHECK-NEXT: sshll.4s v6, v6, #0 ; CHECK-NEXT: sshll2.4s v19, v5, #0 ; CHECK-NEXT: sshll.4s v5, v5, #0 -; CHECK-NEXT: and.16b v7, v7, v17 -; CHECK-NEXT: and.16b v6, v16, v6 -; CHECK-NEXT: and.16b v16, v18, v19 -; CHECK-NEXT: and.16b v4, v4, v5 +; CHECK-NEXT: and.16b v7, v17, v7 +; CHECK-NEXT: and.16b v6, v6, v16 +; CHECK-NEXT: and.16b v16, v19, v18 +; CHECK-NEXT: and.16b v4, v5, v4 ; CHECK-NEXT: stp q6, q7, [x1, #32] ; CHECK-NEXT: stp q4, q16, [x1], #64 ; CHECK-NEXT: b.ne LBB24_1 @@ -674,10 +674,10 @@ define void @extension_in_loop_as_shuffle_v16i8_to_v16i32(ptr %src, ptr %dst) { ; CHECK-NEXT: sshll.4s v6, v6, #0 ; CHECK-NEXT: sshll2.4s v19, v5, #0 ; CHECK-NEXT: sshll.4s v5, v5, #0 -; CHECK-NEXT: and.16b v7, v7, v17 -; CHECK-NEXT: and.16b v6, v16, v6 -; CHECK-NEXT: and.16b v16, v18, v19 -; CHECK-NEXT: and.16b v4, v4, v5 +; CHECK-NEXT: and.16b v7, v17, v7 +; CHECK-NEXT: and.16b v6, v6, v16 +; CHECK-NEXT: and.16b v16, v19, v18 +; CHECK-NEXT: and.16b v4, v5, v4 ; CHECK-NEXT: stp q6, q7, [x1, #32] ; CHECK-NEXT: stp q4, q16, [x1], #64 ; CHECK-NEXT: b.ne LBB25_1 @@ -745,10 +745,10 @@ define void @shuffle_in_loop_is_no_extend_v16i8_to_v16i32(ptr %src, ptr %dst) { ; CHECK-NEXT: sshll.4s v6, v6, #0 ; CHECK-NEXT: sshll2.4s v19, v5, #0 ; CHECK-NEXT: sshll.4s v5, v5, #0 -; CHECK-NEXT: and.16b v7, v7, v17 -; CHECK-NEXT: and.16b v6, v16, v6 -; CHECK-NEXT: and.16b v16, v18, v19 -; CHECK-NEXT: and.16b v4, v4, v5 +; CHECK-NEXT: and.16b v7, v17, v7 +; CHECK-NEXT: and.16b v6, v6, v16 +; CHECK-NEXT: and.16b v16, v19, v18 +; CHECK-NEXT: and.16b v4, v5, v4 ; CHECK-NEXT: stp q6, q7, [x1, #32] ; CHECK-NEXT: stp q4, q16, [x1], #64 ; CHECK-NEXT: b.ne LBB26_1 diff --git a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll index 4d091c2302658..96f009a4da02d 100644 --- a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll @@ -174,7 +174,7 @@ define <2 x i32> @ustest_f64i32(<2 x double> %x) { ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: mvnne r2, #0 ; CHECK-NEXT: vmov.32 d18[0], r2 -; CHECK-NEXT: vand q8, q8, q9 +; CHECK-NEXT: vand q8, q9, q8 ; CHECK-NEXT: vmovn.i64 d0, q8 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r11, pc} @@ -483,8 +483,8 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) { ; CHECK-NEXT: vmov.32 d20[0], r7 ; CHECK-NEXT: mvnne r4, #0 ; CHECK-NEXT: vmov.32 d18[0], r4 -; CHECK-NEXT: vand q10, q4, q10 -; CHECK-NEXT: vand q8, q8, q9 +; CHECK-NEXT: vand q10, q10, q4 +; CHECK-NEXT: vand q8, q9, q8 ; CHECK-NEXT: vmovn.i64 d1, q10 ; CHECK-NEXT: vmovn.i64 d0, q8 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} @@ -995,8 +995,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-NEON-NEXT: vmov.32 d20[0], r6 ; CHECK-NEON-NEXT: mvnne r7, #0 ; CHECK-NEON-NEXT: vmov.32 d18[0], r7 -; CHECK-NEON-NEXT: vand q10, q4, q10 -; CHECK-NEON-NEXT: vand q8, q8, q9 +; CHECK-NEON-NEXT: vand q10, q10, q4 +; CHECK-NEON-NEXT: vand q8, q9, q8 ; CHECK-NEON-NEXT: vmovn.i64 d1, q10 ; CHECK-NEON-NEXT: vmovn.i64 d0, q8 ; CHECK-NEON-NEXT: vpop {d8, d9, d10, d11, d12, d13} @@ -1097,8 +1097,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-FP16-NEXT: vmov.32 d20[0], r7 ; CHECK-FP16-NEXT: mvnne r6, #0 ; CHECK-FP16-NEXT: vmov.32 d18[0], r6 -; CHECK-FP16-NEXT: vand q10, q4, q10 -; CHECK-FP16-NEXT: vand q8, q8, q9 +; CHECK-FP16-NEXT: vand q10, q10, q4 +; CHECK-FP16-NEXT: vand q8, q9, q8 ; CHECK-FP16-NEXT: vmovn.i64 d1, q10 ; CHECK-FP16-NEXT: vmovn.i64 d0, q8 ; CHECK-FP16-NEXT: vpop {d8, d9, d10, d11, d12, d13} diff --git a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll index ec7516524ee67..6706d25ae01d2 100644 --- a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll +++ b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll @@ -1362,7 +1362,7 @@ define void @pr65820(ptr %y, <4 x float> %splat) { ; ARMV7-NEXT: vmov d16, r2, r3 ; ARMV7-NEXT: vdup.32 q8, d16[0] ; ARMV7-NEXT: vcgt.f32 q9, q8, #0 -; ARMV7-NEXT: vand q8, q8, q9 +; ARMV7-NEXT: vand q8, q9, q8 ; ARMV7-NEXT: vst1.32 {d16, d17}, [r0] ; ARMV7-NEXT: bx lr ; diff --git a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll index 5a861be95977d..c61b7841b95ac 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll @@ -383,8 +383,7 @@ define i8 @xvmsk_eq_vsel_slt_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) ; CHECK-LABEL: xvmsk_eq_vsel_slt_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: xvseq.w $xr0, $xr0, $xr1 -; CHECK-NEXT: xvrepli.b $xr1, -1 -; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr1, $xr0 +; CHECK-NEXT: xvor.v $xr0, $xr0, $xr2 ; CHECK-NEXT: xvmskltz.w $xr0, $xr0 ; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 ; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 diff --git a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll index 1c10e6c3087ad..4d2ddeb2889bb 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll @@ -6,8 +6,8 @@ define void @select_v16i8_imm(ptr %res, ptr %a0) nounwind { ; CHECK: # %bb.0: ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vrepli.h $vr1, -256 -; CHECK-NEXT: vbitseli.b $vr1, $vr0, 255 -; CHECK-NEXT: vst $vr1, $a0, 0 +; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret %v0 = load <16 x i8>, ptr %a0 %sel = select <16 x i1> , <16 x i8> , <16 x i8> %v0 diff --git a/llvm/test/CodeGen/Mips/msa/compare_float.ll b/llvm/test/CodeGen/Mips/msa/compare_float.ll index 178264581ea19..d0609c65d3206 100644 --- a/llvm/test/CodeGen/Mips/msa/compare_float.ll +++ b/llvm/test/CodeGen/Mips/msa/compare_float.ll @@ -525,26 +525,14 @@ define void @bsel_v2f64(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { ; Note that IfSet and IfClr are swapped since the condition is inverted define void @bseli_v4f32(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { -; MIPS-LABEL: bseli_v4f32: -; MIPS: # %bb.0: -; MIPS-NEXT: ld.w $w0, 0($5) -; MIPS-NEXT: ld.w $w1, 0($6) -; MIPS-NEXT: fclt.w $w1, $w1, $w0 -; MIPS-NEXT: ldi.b $w2, 0 -; MIPS-NEXT: shf.b $w2, $w2, 27 -; MIPS-NEXT: bsel.v $w1, $w2, $w0 -; MIPS-NEXT: jr $ra -; MIPS-NEXT: st.w $w1, 0($4) -; -; MIPSEL-LABEL: bseli_v4f32: -; MIPSEL: # %bb.0: -; MIPSEL-NEXT: ld.w $w0, 0($5) -; MIPSEL-NEXT: ld.w $w1, 0($6) -; MIPSEL-NEXT: fclt.w $w1, $w1, $w0 -; MIPSEL-NEXT: ldi.b $w2, 0 -; MIPSEL-NEXT: bsel.v $w1, $w2, $w0 -; MIPSEL-NEXT: jr $ra -; MIPSEL-NEXT: st.w $w1, 0($4) +; CHECK-LABEL: bseli_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: ld.w $w0, 0($5) +; CHECK-NEXT: ld.w $w1, 0($6) +; CHECK-NEXT: fclt.w $w1, $w1, $w0 +; CHECK-NEXT: and.v $w0, $w1, $w0 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: st.w $w0, 0($4) %1 = load <4 x float>, ptr %a %2 = load <4 x float>, ptr %b %3 = fcmp ogt <4 x float> %1, %2 @@ -555,27 +543,14 @@ define void @bseli_v4f32(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { ; Note that IfSet and IfClr are swapped since the condition is inverted define void @bseli_v2f64(ptr %d, ptr %a, ptr %b, ptr %c) nounwind { -; MIPS-LABEL: bseli_v2f64: -; MIPS: # %bb.0: -; MIPS-NEXT: ld.d $w0, 0($5) -; MIPS-NEXT: ld.d $w1, 0($6) -; MIPS-NEXT: fclt.d $w1, $w1, $w0 -; MIPS-NEXT: ldi.b $w2, 0 -; MIPS-NEXT: shf.b $w2, $w2, 27 -; MIPS-NEXT: shf.w $w2, $w2, 177 -; MIPS-NEXT: bsel.v $w1, $w2, $w0 -; MIPS-NEXT: jr $ra -; MIPS-NEXT: st.d $w1, 0($4) -; -; MIPSEL-LABEL: bseli_v2f64: -; MIPSEL: # %bb.0: -; MIPSEL-NEXT: ld.d $w0, 0($5) -; MIPSEL-NEXT: ld.d $w1, 0($6) -; MIPSEL-NEXT: fclt.d $w1, $w1, $w0 -; MIPSEL-NEXT: ldi.b $w2, 0 -; MIPSEL-NEXT: bsel.v $w1, $w2, $w0 -; MIPSEL-NEXT: jr $ra -; MIPSEL-NEXT: st.d $w1, 0($4) +; CHECK-LABEL: bseli_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: ld.d $w0, 0($5) +; CHECK-NEXT: ld.d $w1, 0($6) +; CHECK-NEXT: fclt.d $w1, $w1, $w0 +; CHECK-NEXT: and.v $w0, $w1, $w0 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: st.d $w0, 0($4) %1 = load <2 x double>, ptr %a %2 = load <2 x double>, ptr %b %3 = fcmp ogt <2 x double> %1, %2 @@ -643,3 +618,6 @@ define void @min_v2f64(ptr %c, ptr %a, ptr %b) nounwind { store <2 x double> %3, ptr %c ret void } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MIPS: {{.*}} +; MIPSEL: {{.*}} diff --git a/llvm/test/CodeGen/PowerPC/recipest.ll b/llvm/test/CodeGen/PowerPC/recipest.ll index 96e2c6c3e081a..2598a410b8761 100644 --- a/llvm/test/CodeGen/PowerPC/recipest.ll +++ b/llvm/test/CodeGen/PowerPC/recipest.ll @@ -1024,15 +1024,16 @@ define <4 x float> @hoo3_fmf(<4 x float> %a) #1 { ; CHECK-P7-NEXT: vslw 3, 3, 3 ; CHECK-P7-NEXT: lvx 0, 0, 3 ; CHECK-P7-NEXT: addis 3, 2, .LCPI25_1@toc@ha -; CHECK-P7-NEXT: addi 3, 3, .LCPI25_1@toc@l ; CHECK-P7-NEXT: vmaddfp 5, 2, 4, 3 -; CHECK-P7-NEXT: lvx 1, 0, 3 +; CHECK-P7-NEXT: addi 3, 3, .LCPI25_1@toc@l ; CHECK-P7-NEXT: vmaddfp 4, 5, 4, 0 -; CHECK-P7-NEXT: vmaddfp 5, 5, 1, 3 +; CHECK-P7-NEXT: lvx 0, 0, 3 +; CHECK-P7-NEXT: vmaddfp 5, 5, 0, 3 ; CHECK-P7-NEXT: vmaddfp 3, 5, 4, 3 ; CHECK-P7-NEXT: vxor 4, 4, 4 ; CHECK-P7-NEXT: vcmpeqfp 2, 2, 4 -; CHECK-P7-NEXT: vsel 2, 3, 4, 2 +; CHECK-P7-NEXT: vnot 2, 2 +; CHECK-P7-NEXT: vand 2, 2, 3 ; CHECK-P7-NEXT: blr ; ; CHECK-P8-LABEL: hoo3_fmf: diff --git a/llvm/test/CodeGen/PowerPC/sat-add.ll b/llvm/test/CodeGen/PowerPC/sat-add.ll index d9b22bda85e44..34b703a981105 100644 --- a/llvm/test/CodeGen/PowerPC/sat-add.ll +++ b/llvm/test/CodeGen/PowerPC/sat-add.ll @@ -536,12 +536,11 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_sum(<2 x i64> %x) { ; CHECK-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum: ; CHECK: # %bb.0: ; CHECK-NEXT: addis 3, 2, .LCPI34_0@toc@ha -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: addi 3, 3, .LCPI34_0@toc@l ; CHECK-NEXT: lxvd2x 35, 0, 3 ; CHECK-NEXT: vaddudm 3, 2, 3 ; CHECK-NEXT: vcmpgtud 2, 2, 3 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %a = add <2 x i64> %x, %c = icmp ugt <2 x i64> %x, %a @@ -553,7 +552,6 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) { ; CHECK-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval: ; CHECK: # %bb.0: ; CHECK-NEXT: addis 3, 2, .LCPI35_0@toc@ha -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: addi 3, 3, .LCPI35_0@toc@l ; CHECK-NEXT: lxvd2x 35, 0, 3 ; CHECK-NEXT: addis 3, 2, .LCPI35_1@toc@ha @@ -561,7 +559,7 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) { ; CHECK-NEXT: lxvd2x 36, 0, 3 ; CHECK-NEXT: vaddudm 3, 2, 3 ; CHECK-NEXT: vcmpgtud 2, 2, 4 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %a = add <2 x i64> %x, %c = icmp ugt <2 x i64> %x, @@ -599,9 +597,8 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16 ; CHECK: # %bb.0: ; CHECK-NEXT: xxlnor 36, 35, 35 ; CHECK-NEXT: vaddubm 3, 2, 3 -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: vcmpgtub 2, 2, 4 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %noty = xor <16 x i8> %y, %a = add <16 x i8> %x, %y @@ -640,9 +637,8 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8 ; CHECK: # %bb.0: ; CHECK-NEXT: xxlnor 36, 35, 35 ; CHECK-NEXT: vadduhm 3, 2, 3 -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: vcmpgtuh 2, 2, 4 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %noty = xor <8 x i16> %y, %a = add <8 x i16> %x, %y @@ -681,9 +677,8 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4 ; CHECK: # %bb.0: ; CHECK-NEXT: xxlnor 36, 35, 35 ; CHECK-NEXT: vadduwm 3, 2, 3 -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: vcmpgtuw 2, 2, 4 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %noty = xor <4 x i32> %y, %a = add <4 x i32> %x, %y @@ -710,9 +705,8 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_sum(<2 x i64> %x, <2 x i ; CHECK-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum: ; CHECK: # %bb.0: ; CHECK-NEXT: vaddudm 3, 2, 3 -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: vcmpgtud 2, 2, 3 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %a = add <2 x i64> %x, %y %c = icmp ugt <2 x i64> %x, %a @@ -725,9 +719,8 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2 ; CHECK: # %bb.0: ; CHECK-NEXT: xxlnor 36, 35, 35 ; CHECK-NEXT: vaddudm 3, 2, 3 -; CHECK-NEXT: xxleqv 0, 0, 0 ; CHECK-NEXT: vcmpgtud 2, 2, 4 -; CHECK-NEXT: xxsel 34, 35, 0, 34 +; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: blr %noty = xor <2 x i64> %y, %a = add <2 x i64> %x, %y diff --git a/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll b/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll index 2125a0b8912b1..10f9f28e599f8 100644 --- a/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll +++ b/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll @@ -52,8 +52,8 @@ define <2 x double> @f5(<2 x double> %val) { ; CHECK-LABEL: f5: ; CHECK: # %bb.0: ; CHECK-NEXT: vgbm %v0, 0 -; CHECK-NEXT: vfchedb %v1, %v0, %v24 -; CHECK-NEXT: vsel %v24, %v0, %v24, %v1 +; CHECK-NEXT: vfchedb %v0, %v0, %v24 +; CHECK-NEXT: vnc %v24, %v24, %v0 ; CHECK-NEXT: br %r14 %cmp = fcmp ugt <2 x double> %val, zeroinitializer %ret = select <2 x i1> %cmp, <2 x double> %val, <2 x double> zeroinitializer @@ -64,8 +64,8 @@ define <2 x double> @f6(<2 x double> %val) { ; CHECK-LABEL: f6: ; CHECK: # %bb.0: ; CHECK-NEXT: vgbm %v0, 0 -; CHECK-NEXT: vfchedb %v1, %v24, %v0 -; CHECK-NEXT: vsel %v24, %v0, %v24, %v1 +; CHECK-NEXT: vfchedb %v0, %v24, %v0 +; CHECK-NEXT: vnc %v24, %v24, %v0 ; CHECK-NEXT: br %r14 %cmp = fcmp ult <2 x double> %val, zeroinitializer %ret = select <2 x i1> %cmp, <2 x double> %val, <2 x double> zeroinitializer @@ -76,8 +76,8 @@ define <4 x float> @f7(<4 x float> %val) { ; CHECK-LABEL: f7: ; CHECK: # %bb.0: ; CHECK-NEXT: vgbm %v0, 0 -; CHECK-NEXT: vfchesb %v1, %v0, %v24 -; CHECK-NEXT: vsel %v24, %v0, %v24, %v1 +; CHECK-NEXT: vfchesb %v0, %v0, %v24 +; CHECK-NEXT: vnc %v24, %v24, %v0 ; CHECK-NEXT: br %r14 %cmp = fcmp ugt <4 x float> %val, zeroinitializer %ret = select <4 x i1> %cmp, <4 x float> %val, <4 x float> zeroinitializer @@ -88,8 +88,8 @@ define <4 x float> @f8(<4 x float> %val) { ; CHECK-LABEL: f8: ; CHECK: # %bb.0: ; CHECK-NEXT: vgbm %v0, 0 -; CHECK-NEXT: vfchesb %v1, %v24, %v0 -; CHECK-NEXT: vsel %v24, %v0, %v24, %v1 +; CHECK-NEXT: vfchesb %v0, %v24, %v0 +; CHECK-NEXT: vnc %v24, %v24, %v0 ; CHECK-NEXT: br %r14 %cmp = fcmp ult <4 x float> %val, zeroinitializer %ret = select <4 x i1> %cmp, <4 x float> %val, <4 x float> zeroinitializer diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll index 36094fe56d577..84856aab85079 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll @@ -77,11 +77,9 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; CHECK-SSE2-LABEL: t1_all_odd_ne: ; CHECK-SSE2: # %bb.0: ; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1 -; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE2-NEXT: retq ; ; CHECK-SSE41-LABEL: t1_all_odd_ne: @@ -92,7 +90,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm1 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm0 -; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; CHECK-SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-SSE41-NEXT: retq ; ; CHECK-AVX1-LABEL: t1_all_odd_ne: @@ -102,7 +100,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; CHECK-AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX1-NEXT: retq ; ; CHECK-AVX2-LABEL: t1_all_odd_ne: @@ -113,17 +111,16 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind { ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; CHECK-AVX2-NEXT: retq ; ; CHECK-AVX512VL-LABEL: t1_all_odd_ne: ; CHECK-AVX512VL: # %bb.0: ; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 -; CHECK-AVX512VL-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0 -; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 +; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; CHECK-AVX512VL-NEXT: vpternlogq {{.*#+}} xmm0 = m64bcst | (xmm0 ^ xmm1) ; CHECK-AVX512VL-NEXT: retq %urem = urem <4 x i32> %X, %cmp = icmp ne <4 x i32> %urem,