diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index 886486eef0a15..a983ea9585226 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -3763,9 +3763,9 @@ bool RISCVAsmParser::validateInstruction(MCInst &Inst, if (!(MCID.TSFlags & RISCVII::ConstraintMask)) return false; - if (Opcode == RISCV::VC_V_XVW || Opcode == RISCV::VC_V_IVW || - Opcode == RISCV::VC_V_FVW || Opcode == RISCV::VC_V_VVW) { - // Operands Opcode, Dst, uimm, Dst, Rs2, Rs1 for VC_V_XVW. + if (Opcode == RISCV::SF_VC_V_XVW || Opcode == RISCV::SF_VC_V_IVW || + Opcode == RISCV::SF_VC_V_FVW || Opcode == RISCV::SF_VC_V_VVW) { + // Operands Opcode, Dst, uimm, Dst, Rs2, Rs1 for SF_VC_V_XVW. MCRegister VCIXDst = Inst.getOperand(0).getReg(); SMLoc VCIXDstLoc = Operands[2]->getStartLoc(); if (MCID.TSFlags & RISCVII::VS1Constraint) { diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 1b748016a1928..6d9c3569550fb 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -879,32 +879,32 @@ void RISCVDAGToDAGISel::selectSF_VC_X_SE(SDNode *Node) { auto *LMulSDNode = cast(Node->getOperand(7)); switch (LMulSDNode->getSExtValue()) { case 5: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8 - : RISCV::PseudoVC_I_SE_MF8; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF8 + : RISCV::PseudoSF_VC_I_SE_MF8; break; case 6: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4 - : RISCV::PseudoVC_I_SE_MF4; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF4 + : RISCV::PseudoSF_VC_I_SE_MF4; break; case 7: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2 - : RISCV::PseudoVC_I_SE_MF2; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_MF2 + : RISCV::PseudoSF_VC_I_SE_MF2; break; case 0: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1 - : RISCV::PseudoVC_I_SE_M1; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M1 + : RISCV::PseudoSF_VC_I_SE_M1; break; case 1: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2 - : RISCV::PseudoVC_I_SE_M2; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M2 + : RISCV::PseudoSF_VC_I_SE_M2; break; case 2: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4 - : RISCV::PseudoVC_I_SE_M4; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M4 + : RISCV::PseudoSF_VC_I_SE_M4; break; case 3: - Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8 - : RISCV::PseudoVC_I_SE_M8; + Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoSF_VC_X_SE_M8 + : RISCV::PseudoSF_VC_I_SE_M8; break; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td index a87674098a46b..6fe747800b5ad 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -160,10 +160,10 @@ multiclass CustomSiFiveVCIX { let vm = 1 in - defm VC_ # NAME : CustomSiFiveVCIXorVCIF; let vm = 0 in - defm VC_V_ # NAME : CustomSiFiveVCIXorVCIF; } @@ -201,29 +201,29 @@ let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0, let Predicates = [HasVendorXSfvqmaccdod], DecoderNamespace = "XSfvector", DestEEW = EEWSEWx4, RVVConstraint=VS2Constraint in { - def VQMACCU_2x8x2 : CustomSiFiveVMACC<0b101100, OPMVV, "sf.vqmaccu.2x8x2">; - def VQMACC_2x8x2 : CustomSiFiveVMACC<0b101101, OPMVV, "sf.vqmacc.2x8x2">; - def VQMACCUS_2x8x2 : CustomSiFiveVMACC<0b101110, OPMVV, "sf.vqmaccus.2x8x2">; - def VQMACCSU_2x8x2 : CustomSiFiveVMACC<0b101111, OPMVV, "sf.vqmaccsu.2x8x2">; + def SF_VQMACCU_2x8x2 : CustomSiFiveVMACC<0b101100, OPMVV, "sf.vqmaccu.2x8x2">; + def SF_VQMACC_2x8x2 : CustomSiFiveVMACC<0b101101, OPMVV, "sf.vqmacc.2x8x2">; + def SF_VQMACCUS_2x8x2 : CustomSiFiveVMACC<0b101110, OPMVV, "sf.vqmaccus.2x8x2">; + def SF_VQMACCSU_2x8x2 : CustomSiFiveVMACC<0b101111, OPMVV, "sf.vqmaccsu.2x8x2">; } let Predicates = [HasVendorXSfvqmaccqoq], DecoderNamespace = "XSfvector", DestEEW = EEWSEWx4, RVVConstraint=WidenVNoMask in { - def VQMACCU_4x8x4 : CustomSiFiveVMACC<0b111100, OPMVV, "sf.vqmaccu.4x8x4">; - def VQMACC_4x8x4 : CustomSiFiveVMACC<0b111101, OPMVV, "sf.vqmacc.4x8x4">; - def VQMACCUS_4x8x4 : CustomSiFiveVMACC<0b111110, OPMVV, "sf.vqmaccus.4x8x4">; - def VQMACCSU_4x8x4 : CustomSiFiveVMACC<0b111111, OPMVV, "sf.vqmaccsu.4x8x4">; + def SF_VQMACCU_4x8x4 : CustomSiFiveVMACC<0b111100, OPMVV, "sf.vqmaccu.4x8x4">; + def SF_VQMACC_4x8x4 : CustomSiFiveVMACC<0b111101, OPMVV, "sf.vqmacc.4x8x4">; + def SF_VQMACCUS_4x8x4 : CustomSiFiveVMACC<0b111110, OPMVV, "sf.vqmaccus.4x8x4">; + def SF_VQMACCSU_4x8x4 : CustomSiFiveVMACC<0b111111, OPMVV, "sf.vqmaccsu.4x8x4">; } let Predicates = [HasVendorXSfvfwmaccqqq], DecoderNamespace = "XSfvector", DestEEW = EEWSEWx2, RVVConstraint=WidenVNoMask in { - def VFWMACC_4x4x4 : CustomSiFiveVMACC<0b111100, OPFVV, "sf.vfwmacc.4x4x4">; + def SF_VFWMACC_4x4x4 : CustomSiFiveVMACC<0b111100, OPFVV, "sf.vfwmacc.4x4x4">; } let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvector", Uses = [FRM, VL, VTYPE] in { - def VFNRCLIP_XU_F_QF : CustomSiFiveVFNRCLIP<0b100010, OPFVF, "sf.vfnrclip.xu.f.qf">; - def VFNRCLIP_X_F_QF : CustomSiFiveVFNRCLIP<0b100011, OPFVF, "sf.vfnrclip.x.f.qf">; + def SF_VFNRCLIP_XU_F_QF : CustomSiFiveVFNRCLIP<0b100010, OPFVF, "sf.vfnrclip.xu.f.qf">; + def SF_VFNRCLIP_X_F_QF : CustomSiFiveVFNRCLIP<0b100011, OPFVF, "sf.vfnrclip.x.f.qf">; } class VPseudoVC_X : @@ -306,14 +306,14 @@ multiclass VPseudoVC_X { let VLMul = m.value in { let Defs = [SF_VCIX_STATE], Uses = [SF_VCIX_STATE] in { - def "PseudoVC_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_" # NAME # "_SE_" # m.MX : VPseudoVC_X, Sched<[!cast("WriteVC_" # NAME # "_" # m.MX)]>; - def "PseudoVC_V_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_X, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } - def "PseudoVC_V_" # NAME # "_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_" # m.MX : VPseudoVC_V_X, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } @@ -323,14 +323,14 @@ multiclass VPseudoVC_XV { let VLMul = m.value in { let Defs = [SF_VCIX_STATE], Uses = [SF_VCIX_STATE] in { - def "PseudoVC_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_" # NAME # "_SE_" # m.MX : VPseudoVC_XV, Sched<[!cast("WriteVC_" # NAME # "_" # m.MX)]>; - def "PseudoVC_V_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XV, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } - def "PseudoVC_V_" # NAME # "_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XV, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } @@ -340,14 +340,14 @@ multiclass VPseudoVC_XVV { let VLMul = m.value in { let Defs = [SF_VCIX_STATE], Uses = [SF_VCIX_STATE] in { - def "PseudoVC_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV, Sched<[!cast("WriteVC_" # NAME # "_" # m.MX)]>; - def "PseudoVC_V_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } - def "PseudoVC_V_" # NAME # "_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } @@ -357,15 +357,15 @@ multiclass VPseudoVC_XVW { let VLMul = m.value in { let Defs = [SF_VCIX_STATE], Uses = [SF_VCIX_STATE] in - def "PseudoVC_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV, Sched<[!cast("WriteVC_" # NAME # "_" # m.MX)]>; let Constraints = "@earlyclobber $rd, $rd = $rs3" in { let Defs = [SF_VCIX_STATE], Uses = [SF_VCIX_STATE] in - def "PseudoVC_V_" # NAME # "_SE_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; - def "PseudoVC_V_" # NAME # "_" # m.MX + def "PseudoSF_VC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV, Sched<[!cast("WriteVC_V_" # NAME # "_" # m.MX)]>; } @@ -435,26 +435,26 @@ let Predicates = [HasVendorXSfvcp] in { } let Predicates = [HasVendorXSfvqmaccdod] in { - defm VQMACCU_2x8x2 : VPseudoSiFiveVQMACCDOD; - defm VQMACC_2x8x2 : VPseudoSiFiveVQMACCDOD; - defm VQMACCUS_2x8x2 : VPseudoSiFiveVQMACCDOD; - defm VQMACCSU_2x8x2 : VPseudoSiFiveVQMACCDOD; + defm SF_VQMACCU_2x8x2 : VPseudoSiFiveVQMACCDOD; + defm SF_VQMACC_2x8x2 : VPseudoSiFiveVQMACCDOD; + defm SF_VQMACCUS_2x8x2 : VPseudoSiFiveVQMACCDOD; + defm SF_VQMACCSU_2x8x2 : VPseudoSiFiveVQMACCDOD; } let Predicates = [HasVendorXSfvqmaccqoq] in { - defm VQMACCU_4x8x4 : VPseudoSiFiveVQMACCQOQ; - defm VQMACC_4x8x4 : VPseudoSiFiveVQMACCQOQ; - defm VQMACCUS_4x8x4 : VPseudoSiFiveVQMACCQOQ; - defm VQMACCSU_4x8x4 : VPseudoSiFiveVQMACCQOQ; + defm SF_VQMACCU_4x8x4 : VPseudoSiFiveVQMACCQOQ; + defm SF_VQMACC_4x8x4 : VPseudoSiFiveVQMACCQOQ; + defm SF_VQMACCUS_4x8x4 : VPseudoSiFiveVQMACCQOQ; + defm SF_VQMACCSU_4x8x4 : VPseudoSiFiveVQMACCQOQ; } let Predicates = [HasVendorXSfvfwmaccqqq] in { - defm VFWMACC_4x4x4 : VPseudoSiFiveVFWMACC; + defm SF_VFWMACC_4x4x4 : VPseudoSiFiveVFWMACC; } let Predicates = [HasVendorXSfvfnrclipxfqf] in { - defm VFNRCLIP_XU_F_QF : VPseudoSiFiveVFNRCLIP; - defm VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP; + defm SF_VFNRCLIP_XU_F_QF : VPseudoSiFiveVFNRCLIP; + defm SF_VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP; } // SDNode @@ -660,11 +660,11 @@ class VPatVC_V_OP3 { def : VPatVC_V_OP3_ISD("sf_vc_v_" # intrinsic_suffix # "_se"), - "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, + "PseudoSF_VC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, vti.Vector, XLenVT, type, vti.Log2SEW, payload5, kind>; def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix, - "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX, + "PseudoSF_VC_V_" # instruction_suffix # "_" # vti.LMul.MX, vti.Vector, XLenVT, type, vti.Log2SEW, payload5, kind>; } @@ -673,15 +673,15 @@ multiclass VPatVC_XV { def : VPatVC_OP4_ISD("sf_vc_" # intrinsic_suffix # "_se"), - "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX, + "PseudoSF_VC_" # instruction_suffix # "_SE_" # vti.LMul.MX, XLenVT, vti.Vector, type, vti.Log2SEW, payload5, vti.RegClass, kind, op1_kind>; def : VPatVC_V_OP3_ISD("sf_vc_v_" # intrinsic_suffix # "_se"), - "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, + "PseudoSF_VC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, vti.Vector, vti.Vector, type, vti.Log2SEW, vti.RegClass, kind, op1_kind>; def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix, - "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX, + "PseudoSF_VC_V_" # instruction_suffix # "_" # vti.LMul.MX, vti.Vector, vti.Vector, type, vti.Log2SEW, vti.RegClass, kind, op1_kind>; } @@ -690,15 +690,15 @@ multiclass VPatVC_XVV { def : VPatVC_OP4_ISD("sf_vc_" # intrinsic_suffix # "_se"), - "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX, + "PseudoSF_VC_" # instruction_suffix # "_SE_" # vti.LMul.MX, wti.Vector, vti.Vector, type, vti.Log2SEW, wti.RegClass, vti.RegClass, kind, op1_kind>; def : VPatVC_V_OP4_ISD("sf_vc_v_" # intrinsic_suffix # "_se"), - "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, + "PseudoSF_VC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX, wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW, wti.RegClass, vti.RegClass, kind, op1_kind>; def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix, - "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX, + "PseudoSF_VC_V_" # instruction_suffix # "_" # vti.LMul.MX, wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW, wti.RegClass, vti.RegClass, kind, op1_kind>; } @@ -810,26 +810,26 @@ let Predicates = [HasVendorXSfvcp] in { } let Predicates = [HasVendorXSfvqmaccdod] in { - defm : VPatVQMACCDOD<"vqmaccu_2x8x2", "VQMACCU", "2x8x2">; - defm : VPatVQMACCDOD<"vqmacc_2x8x2", "VQMACC", "2x8x2">; - defm : VPatVQMACCDOD<"vqmaccus_2x8x2", "VQMACCUS", "2x8x2">; - defm : VPatVQMACCDOD<"vqmaccsu_2x8x2", "VQMACCSU", "2x8x2">; + defm : VPatVQMACCDOD<"vqmaccu_2x8x2", "SF_VQMACCU", "2x8x2">; + defm : VPatVQMACCDOD<"vqmacc_2x8x2", "SF_VQMACC", "2x8x2">; + defm : VPatVQMACCDOD<"vqmaccus_2x8x2", "SF_VQMACCUS", "2x8x2">; + defm : VPatVQMACCDOD<"vqmaccsu_2x8x2", "SF_VQMACCSU", "2x8x2">; } let Predicates = [HasVendorXSfvqmaccqoq] in { - defm : VPatVQMACCQOQ<"vqmaccu_4x8x4", "VQMACCU", "4x8x4">; - defm : VPatVQMACCQOQ<"vqmacc_4x8x4", "VQMACC", "4x8x4">; - defm : VPatVQMACCQOQ<"vqmaccus_4x8x4", "VQMACCUS", "4x8x4">; - defm : VPatVQMACCQOQ<"vqmaccsu_4x8x4", "VQMACCSU", "4x8x4">; + defm : VPatVQMACCQOQ<"vqmaccu_4x8x4", "SF_VQMACCU", "4x8x4">; + defm : VPatVQMACCQOQ<"vqmacc_4x8x4", "SF_VQMACC", "4x8x4">; + defm : VPatVQMACCQOQ<"vqmaccus_4x8x4", "SF_VQMACCUS", "4x8x4">; + defm : VPatVQMACCQOQ<"vqmaccsu_4x8x4", "SF_VQMACCSU", "4x8x4">; } let Predicates = [HasVendorXSfvfwmaccqqq] in { - defm : VPatVFWMACC<"vfwmacc_4x4x4", "VFWMACC", "4x4x4">; + defm : VPatVFWMACC<"vfwmacc_4x4x4", "SF_VFWMACC", "4x4x4">; } let Predicates = [HasVendorXSfvfnrclipxfqf] in { - defm : VPatVFNRCLIP<"vfnrclip_xu_f_qf", "VFNRCLIP_XU_F_QF">; - defm : VPatVFNRCLIP<"vfnrclip_x_f_qf", "VFNRCLIP_X_F_QF">; + defm : VPatVFNRCLIP<"vfnrclip_xu_f_qf", "SF_VFNRCLIP_XU_F_QF">; + defm : VPatVFNRCLIP<"vfnrclip_x_f_qf", "SF_VFNRCLIP_X_F_QF">; } let Predicates = [HasVendorXSiFivecdiscarddlone] in { diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir index be73d4808937a..31e79e58f44c5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir +++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir @@ -47,7 +47,7 @@ body: | %22:vr = PseudoVMSNE_VI_M1 %3, 0, 1, 6 /* e64 */ %23:vmv0 = COPY %22 %25:vrnov0 = PseudoVMERGE_VIM_M1 undef $noreg, %17, -1, %23, 1, 6 /* e64 */ - %29:vr = PseudoVC_V_X_SE_M1 3, 31, %2, 1, 6 /* e64 */, implicit-def dead $sf_vcix_state, implicit $sf_vcix_state + %29:vr = PseudoSF_VC_V_X_SE_M1 3, 31, %2, 1, 6 /* e64 */, implicit-def dead $sf_vcix_state, implicit $sf_vcix_state %30:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 1, 6 /* e64 */, 0 BGEU %1, $x0, %bb.2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir index 2786f58826b91..ccabd5099071b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -290,10 +290,10 @@ body: | ; CHECK: liveins: $x2, $x10, $v8, $v13, $v4m4, $v16m4 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoVSETVLI $x10, 66 /* e8, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: early-clobber $v4m4 = PseudoVQMACCUS_2x8x2_M4 renamable $v4m4, killed renamable $v13, killed renamable $v16m4, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: early-clobber $v4m4 = PseudoSF_VQMACCUS_2x8x2_M4 renamable $v4m4, killed renamable $v13, killed renamable $v16m4, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v16m4 = PseudoVMV_V_V_M4 undef $v16m4, $v4m4, $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype $x0 = PseudoVSETVLI $x10, 66, implicit-def $vl, implicit-def $vtype - early-clobber $v4m4 = PseudoVQMACCUS_2x8x2_M4 renamable $v4m4, killed renamable $v13, killed renamable $v16m4, $noreg, 3, 1, implicit $vl, implicit $vtype + early-clobber $v4m4 = PseudoSF_VQMACCUS_2x8x2_M4 renamable $v4m4, killed renamable $v13, killed renamable $v16m4, $noreg, 3, 1, implicit $vl, implicit $vtype $v16m4 = COPY renamable $v4m4 ... --- @@ -306,10 +306,10 @@ body: | ; CHECK: liveins: $x2, $x10, $v8, $v13, $v4m4, $v16m2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x0 = PseudoVSETVLI $x10, 65 /* e8, m2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: early-clobber $v4m4 = PseudoVQMACCUS_4x8x4_M2 renamable $v4m4, killed renamable $v13, killed renamable $v16m2, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype + ; CHECK-NEXT: early-clobber $v4m4 = PseudoSF_VQMACCUS_4x8x4_M2 renamable $v4m4, killed renamable $v13, killed renamable $v16m2, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v16m4 = VMV4R_V $v4m4, implicit $vtype $x0 = PseudoVSETVLI $x10, 65, implicit-def $vl, implicit-def $vtype - early-clobber $v4m4 = PseudoVQMACCUS_4x8x4_M2 renamable $v4m4, killed renamable $v13, killed renamable $v16m2, $noreg, 3, 1, implicit $vl, implicit $vtype + early-clobber $v4m4 = PseudoSF_VQMACCUS_4x8x4_M2 renamable $v4m4, killed renamable $v13, killed renamable $v16m2, $noreg, 3, 1, implicit $vl, implicit $vtype $v16m4 = COPY renamable $v4m4 ... --- diff --git a/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp b/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp index 0e6c1172ee9b5..15ae8d729a37e 100644 --- a/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp +++ b/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp @@ -374,8 +374,8 @@ TEST_P(RISCVInstrInfoTest, GetDestEEW) { EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::VMSEQ_VV), 4), 0u); EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::VMAND_MM), 0), 0u); EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::VIOTA_M), 3), 3u); - EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::VQMACCU_2x8x2), 3), 5u); - EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::VFWMACC_4x4x4), 4), 5u); + EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::SF_VQMACCU_2x8x2), 3), 5u); + EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::SF_VFWMACC_4x4x4), 4), 5u); EXPECT_EQ(RISCV::getDestLog2EEW(TII->get(RISCV::THVdotVMAQA_VV), 5), 5u); }