Skip to content

[RISCV] custom scmp(x,0) and scmp(0,x) lowering for RVV #151753

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -880,6 +880,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
Legal);

setOperationAction(ISD::SCMP, VT, Custom);
setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);

// Custom-lower extensions and truncations from/to mask types.
Expand Down Expand Up @@ -1361,6 +1362,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(
{ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);

setOperationAction(ISD::SCMP, VT, Custom);
setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);

// vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
Expand Down Expand Up @@ -8223,6 +8225,36 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::SADDSAT:
case ISD::SSUBSAT:
return lowerToScalableOp(Op, DAG);
case ISD::SCMP: {
SDLoc DL(Op);
EVT VT = Op->getValueType(0);
SDValue LHS = Op->getOperand(0);
SDValue RHS = Op->getOperand(1);
unsigned SEW = VT.getScalarSizeInBits();
EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);

SDValue Shift = DAG.getConstant(SEW - 1, DL, VT);
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue One = DAG.getConstant(1, DL, VT);
SDValue MinusOne = DAG.getAllOnesConstant(DL, VT);

if (ISD::isConstantSplatVectorAllZeros(RHS.getNode())) {
// scmp(lhs, 0) -> vor.vv(vsra.vi/vx(lhs,SEW-1), vmin.vx(lhs,1))
LHS = DAG.getFreeze(LHS);
SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, LHS, Shift);
SDValue Min = DAG.getNode(ISD::SMIN, DL, VT, LHS, One);
return DAG.getNode(ISD::OR, DL, VT, Sra, Min);
}
if (ISD::isConstantSplatVectorAllZeros(LHS.getNode())) {
// scmp(0, rhs) -> vmerge.vi(vmsle.vi(rhs,0), vsrl.vi/vx(rhs,SEW-1), -1)
RHS = DAG.getFreeze(RHS);
SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, RHS, Shift);
SDValue Setcc = DAG.getSetCC(DL, CCVT, RHS, Zero, ISD::SETLE);
return DAG.getSelect(DL, VT, Setcc, Srl, MinusOne);
}

return SDValue();
}
case ISD::ABDS:
case ISD::ABDU: {
SDLoc dl(Op);
Expand Down
188 changes: 188 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scmp.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64

define <16 x i8> @scmp_i8i8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: scmp_i8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: vmslt.vv v0, v8, v9
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
; CHECK-NEXT: ret
%c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> %b)
ret <16 x i8> %c
}

define <16 x i8> @scmp_z8i8(<16 x i8> %a) {
; CHECK-LABEL: scmp_z8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v9, -1
; CHECK-NEXT: vsrl.vi v9, v8, 7, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%c = call <16 x i8> @llvm.scmp(<16 x i8> zeroinitializer, <16 x i8> %a)
ret <16 x i8> %c
}

define <16 x i8> @scmp_i8z8(<16 x i8> %a) {
; CHECK-LABEL: scmp_i8z8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmin.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 7
; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: ret
%c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> zeroinitializer)
ret <16 x i8> %c
}


define <8 x i16> @scmp_i16i16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: scmp_i16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: vmslt.vv v0, v8, v9
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
; CHECK-NEXT: ret
%c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> %b)
ret <8 x i16> %c
}

define <8 x i16> @scmp_z16i16(<8 x i16> %a) {
; CHECK-LABEL: scmp_z16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v9, -1
; CHECK-NEXT: vsrl.vi v9, v8, 15, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%c = call <8 x i16> @llvm.scmp(<8 x i16> zeroinitializer, <8 x i16> %a)
ret <8 x i16> %c
}

define <8 x i16> @scmp_i16z16(<8 x i16> %a) {
; CHECK-LABEL: scmp_i16z16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmin.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 15
; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: ret
%c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> zeroinitializer)
ret <8 x i16> %c
}


define <4 x i32> @scmp_i32i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: scmp_i32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: vmslt.vv v0, v8, v9
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
; CHECK-NEXT: ret
%c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> %b)
ret <4 x i32> %c
}

define <4 x i32> @scmp_z32i32(<4 x i32> %a) {
; CHECK-LABEL: scmp_z32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v9, -1
; CHECK-NEXT: vsrl.vi v9, v8, 31, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%c = call <4 x i32> @llvm.scmp(<4 x i32> zeroinitializer, <4 x i32> %a)
ret <4 x i32> %c
}

define <4 x i32> @scmp_i32z32(<4 x i32> %a) {
; CHECK-LABEL: scmp_i32z32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmin.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: ret
%c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> zeroinitializer)
ret <4 x i32> %c
}


define <2 x i64> @scmp_i64i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: scmp_i64i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: vmslt.vv v0, v8, v9
; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
; CHECK-NEXT: ret
%c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> %b)
ret <2 x i64> %c
}

define <2 x i64> @scmp_z64i64(<2 x i64> %a) {
; RV32-LABEL: scmp_z64i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vmsle.vi v0, v8, -1
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: vmsgt.vi v0, v8, 0
; RV32-NEXT: vmerge.vim v8, v9, -1, v0
; RV32-NEXT: ret
;
; RV64-LABEL: scmp_z64i64:
; RV64: # %bb.0:
; RV64-NEXT: li a0, 63
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vmsle.vi v0, v8, 0
; RV64-NEXT: vmv.v.i v9, -1
; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v8, v9
; RV64-NEXT: ret
%c = call <2 x i64> @llvm.scmp(<2 x i64> zeroinitializer, <2 x i64> %a)
ret <2 x i64> %c
}

define <2 x i64> @scmp_i64z64(<2 x i64> %a) {
; RV32-LABEL: scmp_i64z64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vmsgt.vi v0, v8, 0
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: vmsle.vi v0, v8, -1
; RV32-NEXT: vmerge.vim v8, v9, -1, v0
; RV32-NEXT: ret
;
; RV64-LABEL: scmp_i64z64:
; RV64: # %bb.0:
; RV64-NEXT: li a0, 1
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vmin.vx v9, v8, a0
; RV64-NEXT: li a0, 63
; RV64-NEXT: vsra.vx v8, v8, a0
; RV64-NEXT: vor.vv v8, v8, v9
; RV64-NEXT: ret
%c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> zeroinitializer)
ret <2 x i64> %c
}
Loading