@@ -2870,14 +2870,10 @@ pub unsafe fn __msa_ceqi_d(a: v2i64, imm_s5: i32) -> v2i64 {
2870
2870
#[inline]
2871
2871
#[target_feature(enable = "msa")]
2872
2872
#[cfg_attr(test, assert_instr(cfcmsa, imm5 = 0b11111))]
2873
- #[rustc_args_required_const(0)]
2874
- pub unsafe fn __msa_cfcmsa(imm5: i32) -> i32 {
2875
- macro_rules! call {
2876
- ($imm5:expr) => {
2877
- msa_cfcmsa($imm5)
2878
- };
2879
- }
2880
- constify_imm5!(imm5, call)
2873
+ #[rustc_legacy_const_generics(0)]
2874
+ pub unsafe fn __msa_cfcmsa<const IMM5: i32>() -> i32 {
2875
+ static_assert_imm5!(IMM5);
2876
+ msa_cfcmsa(IMM5)
2881
2877
}
2882
2878
2883
2879
/// Vector Compare Signed Less Than or Equal
@@ -3578,14 +3574,10 @@ pub unsafe fn __msa_copy_u_d(a: v2i64, imm1: i32) -> u64 {
3578
3574
#[inline]
3579
3575
#[target_feature(enable = "msa")]
3580
3576
#[cfg_attr(test, assert_instr(ctcmsa, imm1 = 0b1))]
3581
- #[rustc_args_required_const(0)]
3582
- pub unsafe fn __msa_ctcmsa(imm5: i32, a: i32) -> () {
3583
- macro_rules! call {
3584
- ($imm5:expr) => {
3585
- msa_ctcmsa($imm5, a)
3586
- };
3587
- }
3588
- constify_imm5!(imm5, call)
3577
+ #[rustc_legacy_const_generics(0)]
3578
+ pub unsafe fn __msa_ctcmsa<const IMM5: i32>(a: i32) -> () {
3579
+ static_assert_imm5!(IMM5);
3580
+ msa_ctcmsa(IMM5, a)
3589
3581
}
3590
3582
3591
3583
/// Vector Signed Divide
@@ -6026,14 +6018,10 @@ pub unsafe fn __msa_ld_d(mem_addr: *mut u8, imm_s13: i32) -> v2i64 {
6026
6018
#[inline]
6027
6019
#[target_feature(enable = "msa")]
6028
6020
#[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))]
6029
- #[rustc_args_required_const(0)]
6030
- pub unsafe fn __msa_ldi_b(imm_s10: i32) -> v16i8 {
6031
- macro_rules! call {
6032
- ($imm_s10:expr) => {
6033
- msa_ldi_b($imm_s10)
6034
- };
6035
- }
6036
- constify_imm_s10!(imm_s10, call)
6021
+ #[rustc_legacy_const_generics(0)]
6022
+ pub unsafe fn __msa_ldi_b<const IMM_S10: i32>() -> v16i8 {
6023
+ static_assert_imm_s10!(IMM_S10);
6024
+ msa_ldi_b(IMM_S10)
6037
6025
}
6038
6026
6039
6027
/// Immediate Load
@@ -6045,14 +6033,10 @@ pub unsafe fn __msa_ldi_b(imm_s10: i32) -> v16i8 {
6045
6033
#[inline]
6046
6034
#[target_feature(enable = "msa")]
6047
6035
#[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))]
6048
- #[rustc_args_required_const(0)]
6049
- pub unsafe fn __msa_ldi_h(imm_s10: i32) -> v8i16 {
6050
- macro_rules! call {
6051
- ($imm_s10:expr) => {
6052
- msa_ldi_h($imm_s10)
6053
- };
6054
- }
6055
- constify_imm_s10!(imm_s10, call)
6036
+ #[rustc_legacy_const_generics(0)]
6037
+ pub unsafe fn __msa_ldi_h<const IMM_S10: i32>() -> v8i16 {
6038
+ static_assert_imm_s10!(IMM_S10);
6039
+ msa_ldi_h(IMM_S10)
6056
6040
}
6057
6041
6058
6042
/// Immediate Load
@@ -6064,14 +6048,10 @@ pub unsafe fn __msa_ldi_h(imm_s10: i32) -> v8i16 {
6064
6048
#[inline]
6065
6049
#[target_feature(enable = "msa")]
6066
6050
#[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))]
6067
- #[rustc_args_required_const(0)]
6068
- pub unsafe fn __msa_ldi_w(imm_s10: i32) -> v4i32 {
6069
- macro_rules! call {
6070
- ($imm_s10:expr) => {
6071
- msa_ldi_w($imm_s10)
6072
- };
6073
- }
6074
- constify_imm_s10!(imm_s10, call)
6051
+ #[rustc_legacy_const_generics(0)]
6052
+ pub unsafe fn __msa_ldi_w<const IMM_S10: i32>() -> v4i32 {
6053
+ static_assert_imm_s10!(IMM_S10);
6054
+ msa_ldi_w(IMM_S10)
6075
6055
}
6076
6056
6077
6057
/// Immediate Load
@@ -6083,14 +6063,10 @@ pub unsafe fn __msa_ldi_w(imm_s10: i32) -> v4i32 {
6083
6063
#[inline]
6084
6064
#[target_feature(enable = "msa")]
6085
6065
#[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))]
6086
- #[rustc_args_required_const(0)]
6087
- pub unsafe fn __msa_ldi_d(imm_s10: i32) -> v2i64 {
6088
- macro_rules! call {
6089
- ($imm_s10:expr) => {
6090
- msa_ldi_d($imm_s10)
6091
- };
6092
- }
6093
- constify_imm_s10!(imm_s10, call)
6066
+ #[rustc_legacy_const_generics(0)]
6067
+ pub unsafe fn __msa_ldi_d<const IMM_S10: i32>() -> v2i64 {
6068
+ static_assert_imm_s10!(IMM_S10);
6069
+ msa_ldi_d(IMM_S10)
6094
6070
}
6095
6071
6096
6072
/// Vector Fixed-Point Multiply and Add
0 commit comments