1#![allow(non_camel_case_types)]
7#![allow(unused_imports)]
8
9use crate::{core_arch::simd, intrinsics::simd::*, marker::Sized, mem, ptr};
10
11#[cfg(test)]
12use stdarch_test::assert_instr;
13
14types! {
15    #![stable(feature = "wasm_simd", since = "1.54.0")]
16
17    pub struct v128(4 x i32);
39}
40
41macro_rules! conversions {
42    ($(($name:ident = $ty:ty))*) => {
43        impl v128 {
44            $(
45                #[inline(always)]
46                pub(crate) fn $name(self) -> $ty {
47                    unsafe { mem::transmute(self) }
48                }
49            )*
50        }
51        $(
52            impl $ty {
53                #[inline(always)]
54                pub(crate) const fn v128(self) -> v128 {
55                    unsafe { mem::transmute(self) }
56                }
57            }
58        )*
59    }
60}
61
62conversions! {
63    (as_u8x16 = simd::u8x16)
64    (as_u16x8 = simd::u16x8)
65    (as_u32x4 = simd::u32x4)
66    (as_u64x2 = simd::u64x2)
67    (as_i8x16 = simd::i8x16)
68    (as_i16x8 = simd::i16x8)
69    (as_i32x4 = simd::i32x4)
70    (as_i64x2 = simd::i64x2)
71    (as_f32x4 = simd::f32x4)
72    (as_f64x2 = simd::f64x2)
73}
74
75#[allow(improper_ctypes)]
76extern "C" {
77    #[link_name = "llvm.wasm.swizzle"]
78    fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
79
80    #[link_name = "llvm.wasm.bitselect.v16i8"]
81    fn llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
82    #[link_name = "llvm.wasm.anytrue.v16i8"]
83    fn llvm_any_true_i8x16(x: simd::i8x16) -> i32;
84
85    #[link_name = "llvm.wasm.alltrue.v16i8"]
86    fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
87    #[link_name = "llvm.wasm.bitmask.v16i8"]
88    fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
89    #[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
90    fn llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
91    #[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
92    fn llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
93    #[link_name = "llvm.wasm.sub.sat.signed.v16i8"]
94    fn llvm_i8x16_sub_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
95    #[link_name = "llvm.wasm.sub.sat.unsigned.v16i8"]
96    fn llvm_i8x16_sub_sat_u(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
97    #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
98    fn llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
99
100    #[link_name = "llvm.wasm.extadd.pairwise.signed.v8i16"]
101    fn llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8;
102    #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v8i16"]
103    fn llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8;
104    #[link_name = "llvm.wasm.q15mulr.sat.signed"]
105    fn llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
106    #[link_name = "llvm.wasm.alltrue.v8i16"]
107    fn llvm_i16x8_all_true(x: simd::i16x8) -> i32;
108    #[link_name = "llvm.wasm.bitmask.v8i16"]
109    fn llvm_bitmask_i16x8(a: simd::i16x8) -> i32;
110    #[link_name = "llvm.wasm.narrow.signed.v8i16.v4i32"]
111    fn llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
112    #[link_name = "llvm.wasm.narrow.unsigned.v8i16.v4i32"]
113    fn llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
114    #[link_name = "llvm.wasm.sub.sat.signed.v8i16"]
115    fn llvm_i16x8_sub_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
116    #[link_name = "llvm.wasm.sub.sat.unsigned.v8i16"]
117    fn llvm_i16x8_sub_sat_u(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
118    #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
119    fn llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
120
121    #[link_name = "llvm.wasm.extadd.pairwise.signed.v16i8"]
122    fn llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4;
123    #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v16i8"]
124    fn llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4;
125    #[link_name = "llvm.wasm.alltrue.v4i32"]
126    fn llvm_i32x4_all_true(x: simd::i32x4) -> i32;
127    #[link_name = "llvm.wasm.bitmask.v4i32"]
128    fn llvm_bitmask_i32x4(a: simd::i32x4) -> i32;
129    #[link_name = "llvm.wasm.dot"]
130    fn llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4;
131
132    #[link_name = "llvm.wasm.alltrue.v2i64"]
133    fn llvm_i64x2_all_true(x: simd::i64x2) -> i32;
134    #[link_name = "llvm.wasm.bitmask.v2i64"]
135    fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
136
137    #[link_name = "llvm.nearbyint.v4f32"]
138    fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
139    #[link_name = "llvm.minimum.v4f32"]
140    fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
141    #[link_name = "llvm.maximum.v4f32"]
142    fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
143
144    #[link_name = "llvm.nearbyint.v2f64"]
145    fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
146    #[link_name = "llvm.minimum.v2f64"]
147    fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
148    #[link_name = "llvm.maximum.v2f64"]
149    fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
150
151    #[link_name = "llvm.fptosi.sat.v4i32.v4f32"]
152    fn llvm_i32x4_trunc_sat_f32x4_s(x: simd::f32x4) -> simd::i32x4;
153    #[link_name = "llvm.fptoui.sat.v4i32.v4f32"]
154    fn llvm_i32x4_trunc_sat_f32x4_u(x: simd::f32x4) -> simd::i32x4;
155    #[link_name = "llvm.fptosi.sat.v2i32.v2f64"]
156    fn llvm_i32x2_trunc_sat_f64x2_s(x: simd::f64x2) -> simd::i32x2;
157    #[link_name = "llvm.fptoui.sat.v2i32.v2f64"]
158    fn llvm_i32x2_trunc_sat_f64x2_u(x: simd::f64x2) -> simd::i32x2;
159}
160
161#[repr(packed)]
162#[derive(Copy)]
163struct Unaligned<T>(T);
164
165impl<T: Copy> Clone for Unaligned<T> {
166    fn clone(&self) -> Unaligned<T> {
167        *self
168    }
169}
170
171#[inline]
194#[cfg_attr(test, assert_instr(v128.load))]
195#[target_feature(enable = "simd128")]
196#[doc(alias("v128.load"))]
197#[stable(feature = "wasm_simd", since = "1.54.0")]
198pub unsafe fn v128_load(m: *const v128) -> v128 {
199    (*(m as *const Unaligned<v128>)).0
200}
201
202#[inline]
211#[cfg_attr(test, assert_instr(v128.load8x8_s))]
212#[target_feature(enable = "simd128")]
213#[doc(alias("v128.load8x8_s"))]
214#[stable(feature = "wasm_simd", since = "1.54.0")]
215pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
216    let m = *(m as *const Unaligned<simd::i8x8>);
217    simd_cast::<_, simd::i16x8>(m.0).v128()
218}
219
220#[inline]
229#[cfg_attr(test, assert_instr(v128.load8x8_u))]
230#[target_feature(enable = "simd128")]
231#[doc(alias("v128.load8x8_u"))]
232#[stable(feature = "wasm_simd", since = "1.54.0")]
233pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 {
234    let m = *(m as *const Unaligned<simd::u8x8>);
235    simd_cast::<_, simd::u16x8>(m.0).v128()
236}
237
238#[stable(feature = "wasm_simd", since = "1.54.0")]
239pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
240
241#[inline]
250#[cfg_attr(test, assert_instr(v128.load16x4_s))]
251#[target_feature(enable = "simd128")]
252#[doc(alias("v128.load16x4_s"))]
253#[stable(feature = "wasm_simd", since = "1.54.0")]
254pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
255    let m = *(m as *const Unaligned<simd::i16x4>);
256    simd_cast::<_, simd::i32x4>(m.0).v128()
257}
258
259#[inline]
268#[cfg_attr(test, assert_instr(v128.load16x4_u))]
269#[target_feature(enable = "simd128")]
270#[doc(alias("v128.load16x4_u"))]
271#[stable(feature = "wasm_simd", since = "1.54.0")]
272pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 {
273    let m = *(m as *const Unaligned<simd::u16x4>);
274    simd_cast::<_, simd::u32x4>(m.0).v128()
275}
276
277#[stable(feature = "wasm_simd", since = "1.54.0")]
278pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
279
280#[inline]
289#[cfg_attr(test, assert_instr(v128.load32x2_s))]
290#[target_feature(enable = "simd128")]
291#[doc(alias("v128.load32x2_s"))]
292#[stable(feature = "wasm_simd", since = "1.54.0")]
293pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
294    let m = *(m as *const Unaligned<simd::i32x2>);
295    simd_cast::<_, simd::i64x2>(m.0).v128()
296}
297
298#[inline]
307#[cfg_attr(test, assert_instr(v128.load32x2_u))]
308#[target_feature(enable = "simd128")]
309#[doc(alias("v128.load32x2_u"))]
310#[stable(feature = "wasm_simd", since = "1.54.0")]
311pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 {
312    let m = *(m as *const Unaligned<simd::u32x2>);
313    simd_cast::<_, simd::u64x2>(m.0).v128()
314}
315
316#[stable(feature = "wasm_simd", since = "1.54.0")]
317pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2;
318
319#[inline]
332#[cfg_attr(test, assert_instr(v128.load8_splat))]
333#[target_feature(enable = "simd128")]
334#[doc(alias("v128.load8_splat"))]
335#[stable(feature = "wasm_simd", since = "1.54.0")]
336pub unsafe fn v128_load8_splat(m: *const u8) -> v128 {
337    u8x16_splat(*m)
338}
339
340#[inline]
353#[cfg_attr(test, assert_instr(v128.load16_splat))]
354#[target_feature(enable = "simd128")]
355#[doc(alias("v128.load16_splat"))]
356#[stable(feature = "wasm_simd", since = "1.54.0")]
357pub unsafe fn v128_load16_splat(m: *const u16) -> v128 {
358    u16x8_splat(ptr::read_unaligned(m))
359}
360
361#[inline]
374#[cfg_attr(test, assert_instr(v128.load32_splat))]
375#[target_feature(enable = "simd128")]
376#[doc(alias("v128.load32_splat"))]
377#[stable(feature = "wasm_simd", since = "1.54.0")]
378pub unsafe fn v128_load32_splat(m: *const u32) -> v128 {
379    u32x4_splat(ptr::read_unaligned(m))
380}
381
382#[inline]
395#[cfg_attr(test, assert_instr(v128.load64_splat))]
396#[target_feature(enable = "simd128")]
397#[doc(alias("v128.load64_splat"))]
398#[stable(feature = "wasm_simd", since = "1.54.0")]
399pub unsafe fn v128_load64_splat(m: *const u64) -> v128 {
400    u64x2_splat(ptr::read_unaligned(m))
401}
402
403#[inline]
416#[cfg_attr(test, assert_instr(v128.load32_zero))]
417#[target_feature(enable = "simd128")]
418#[doc(alias("v128.load32_zero"))]
419#[stable(feature = "wasm_simd", since = "1.54.0")]
420pub unsafe fn v128_load32_zero(m: *const u32) -> v128 {
421    u32x4(ptr::read_unaligned(m), 0, 0, 0)
422}
423
424#[inline]
437#[cfg_attr(test, assert_instr(v128.load64_zero))]
438#[target_feature(enable = "simd128")]
439#[doc(alias("v128.load64_zero"))]
440#[stable(feature = "wasm_simd", since = "1.54.0")]
441pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
442    u64x2_replace_lane::<0>(u64x2(0, 0), ptr::read_unaligned(m))
443}
444
445#[inline]
468#[cfg_attr(test, assert_instr(v128.store))]
469#[target_feature(enable = "simd128")]
470#[doc(alias("v128.store"))]
471#[stable(feature = "wasm_simd", since = "1.54.0")]
472pub unsafe fn v128_store(m: *mut v128, a: v128) {
473    *(m as *mut Unaligned<v128>) = Unaligned(a);
474}
475
476#[inline]
488#[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))]
489#[target_feature(enable = "simd128")]
490#[doc(alias("v128.load8_lane"))]
491#[stable(feature = "wasm_simd", since = "1.54.0")]
492pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
493    u8x16_replace_lane::<L>(v, *m)
494}
495
496#[inline]
508#[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))]
509#[target_feature(enable = "simd128")]
510#[doc(alias("v128.load16_lane"))]
511#[stable(feature = "wasm_simd", since = "1.54.0")]
512pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
513    u16x8_replace_lane::<L>(v, ptr::read_unaligned(m))
514}
515
516#[inline]
528#[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))]
529#[target_feature(enable = "simd128")]
530#[doc(alias("v128.load32_lane"))]
531#[stable(feature = "wasm_simd", since = "1.54.0")]
532pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
533    u32x4_replace_lane::<L>(v, ptr::read_unaligned(m))
534}
535
536#[inline]
548#[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))]
549#[target_feature(enable = "simd128")]
550#[doc(alias("v128.load64_lane"))]
551#[stable(feature = "wasm_simd", since = "1.54.0")]
552pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
553    u64x2_replace_lane::<L>(v, ptr::read_unaligned(m))
554}
555
556#[inline]
568#[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))]
569#[target_feature(enable = "simd128")]
570#[doc(alias("v128.store8_lane"))]
571#[stable(feature = "wasm_simd", since = "1.54.0")]
572pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
573    *m = u8x16_extract_lane::<L>(v);
574}
575
576#[inline]
588#[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))]
589#[target_feature(enable = "simd128")]
590#[doc(alias("v128.store16_lane"))]
591#[stable(feature = "wasm_simd", since = "1.54.0")]
592pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
593    ptr::write_unaligned(m, u16x8_extract_lane::<L>(v))
594}
595
596#[inline]
608#[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))]
609#[target_feature(enable = "simd128")]
610#[doc(alias("v128.store32_lane"))]
611#[stable(feature = "wasm_simd", since = "1.54.0")]
612pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
613    ptr::write_unaligned(m, u32x4_extract_lane::<L>(v))
614}
615
616#[inline]
628#[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))]
629#[target_feature(enable = "simd128")]
630#[doc(alias("v128.store64_lane"))]
631#[stable(feature = "wasm_simd", since = "1.54.0")]
632pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
633    ptr::write_unaligned(m, u64x2_extract_lane::<L>(v))
634}
635
636#[inline]
641#[cfg_attr(
642    test,
643    assert_instr(
644        v128.const,
645        a0 = 0,
646        a1 = 1,
647        a2 = 2,
648        a3 = 3,
649        a4 = 4,
650        a5 = 5,
651        a6 = 6,
652        a7 = 7,
653        a8 = 8,
654        a9 = 9,
655        a10 = 10,
656        a11 = 11,
657        a12 = 12,
658        a13 = 13,
659        a14 = 14,
660        a15 = 15,
661    )
662)]
663#[doc(alias("v128.const"))]
664#[stable(feature = "wasm_simd", since = "1.54.0")]
665#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
666#[target_feature(enable = "simd128")]
667pub const fn i8x16(
668    a0: i8,
669    a1: i8,
670    a2: i8,
671    a3: i8,
672    a4: i8,
673    a5: i8,
674    a6: i8,
675    a7: i8,
676    a8: i8,
677    a9: i8,
678    a10: i8,
679    a11: i8,
680    a12: i8,
681    a13: i8,
682    a14: i8,
683    a15: i8,
684) -> v128 {
685    simd::i8x16::new(
686        a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
687    )
688    .v128()
689}
690
691#[inline]
696#[doc(alias("v128.const"))]
697#[stable(feature = "wasm_simd", since = "1.54.0")]
698#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
699#[target_feature(enable = "simd128")]
700pub const fn u8x16(
701    a0: u8,
702    a1: u8,
703    a2: u8,
704    a3: u8,
705    a4: u8,
706    a5: u8,
707    a6: u8,
708    a7: u8,
709    a8: u8,
710    a9: u8,
711    a10: u8,
712    a11: u8,
713    a12: u8,
714    a13: u8,
715    a14: u8,
716    a15: u8,
717) -> v128 {
718    simd::u8x16::new(
719        a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
720    )
721    .v128()
722}
723
724#[inline]
729#[cfg_attr(
730    test,
731    assert_instr(
732        v128.const,
733        a0 = 0,
734        a1 = 1,
735        a2 = 2,
736        a3 = 3,
737        a4 = 4,
738        a5 = 5,
739        a6 = 6,
740        a7 = 7,
741    )
742)]
743#[doc(alias("v128.const"))]
744#[stable(feature = "wasm_simd", since = "1.54.0")]
745#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
746#[target_feature(enable = "simd128")]
747pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 {
748    simd::i16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
749}
750
751#[inline]
756#[doc(alias("v128.const"))]
757#[stable(feature = "wasm_simd", since = "1.54.0")]
758#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
759#[target_feature(enable = "simd128")]
760pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 {
761    simd::u16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
762}
763
764#[inline]
769#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
770#[doc(alias("v128.const"))]
771#[stable(feature = "wasm_simd", since = "1.54.0")]
772#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
773#[target_feature(enable = "simd128")]
774pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
775    simd::i32x4::new(a0, a1, a2, a3).v128()
776}
777
778#[inline]
783#[doc(alias("v128.const"))]
784#[stable(feature = "wasm_simd", since = "1.54.0")]
785#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
786#[target_feature(enable = "simd128")]
787pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
788    simd::u32x4::new(a0, a1, a2, a3).v128()
789}
790
791#[inline]
796#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
797#[doc(alias("v128.const"))]
798#[stable(feature = "wasm_simd", since = "1.54.0")]
799#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
800#[target_feature(enable = "simd128")]
801pub const fn i64x2(a0: i64, a1: i64) -> v128 {
802    simd::i64x2::new(a0, a1).v128()
803}
804
805#[inline]
810#[doc(alias("v128.const"))]
811#[stable(feature = "wasm_simd", since = "1.54.0")]
812#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
813#[target_feature(enable = "simd128")]
814pub const fn u64x2(a0: u64, a1: u64) -> v128 {
815    simd::u64x2::new(a0, a1).v128()
816}
817
818#[inline]
823#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
824#[doc(alias("v128.const"))]
825#[stable(feature = "wasm_simd", since = "1.54.0")]
826#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
827#[target_feature(enable = "simd128")]
828pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
829    simd::f32x4::new(a0, a1, a2, a3).v128()
830}
831
832#[inline]
837#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
838#[doc(alias("v128.const"))]
839#[stable(feature = "wasm_simd", since = "1.54.0")]
840#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
841#[target_feature(enable = "simd128")]
842pub const fn f64x2(a0: f64, a1: f64) -> v128 {
843    simd::f64x2::new(a0, a1).v128()
844}
845
846#[inline]
861#[cfg_attr(test,
862    assert_instr(
863        i8x16.shuffle,
864        I0 = 0,
865        I1 = 2,
866        I2 = 4,
867        I3 = 6,
868        I4 = 8,
869        I5 = 10,
870        I6 = 12,
871        I7 = 14,
872        I8 = 16,
873        I9 = 18,
874        I10 = 20,
875        I11 = 22,
876        I12 = 24,
877        I13 = 26,
878        I14 = 28,
879        I15 = 30,
880    )
881)]
882#[target_feature(enable = "simd128")]
883#[doc(alias("i8x16.shuffle"))]
884#[stable(feature = "wasm_simd", since = "1.54.0")]
885pub fn i8x16_shuffle<
886    const I0: usize,
887    const I1: usize,
888    const I2: usize,
889    const I3: usize,
890    const I4: usize,
891    const I5: usize,
892    const I6: usize,
893    const I7: usize,
894    const I8: usize,
895    const I9: usize,
896    const I10: usize,
897    const I11: usize,
898    const I12: usize,
899    const I13: usize,
900    const I14: usize,
901    const I15: usize,
902>(
903    a: v128,
904    b: v128,
905) -> v128 {
906    static_assert!(I0 < 32);
907    static_assert!(I1 < 32);
908    static_assert!(I2 < 32);
909    static_assert!(I3 < 32);
910    static_assert!(I4 < 32);
911    static_assert!(I5 < 32);
912    static_assert!(I6 < 32);
913    static_assert!(I7 < 32);
914    static_assert!(I8 < 32);
915    static_assert!(I9 < 32);
916    static_assert!(I10 < 32);
917    static_assert!(I11 < 32);
918    static_assert!(I12 < 32);
919    static_assert!(I13 < 32);
920    static_assert!(I14 < 32);
921    static_assert!(I15 < 32);
922    let shuf: simd::u8x16 = unsafe {
923        simd_shuffle!(
924            a.as_u8x16(),
925            b.as_u8x16(),
926            [
927                I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
928                I7 as u32, I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32,
929                I14 as u32, I15 as u32,
930            ],
931        )
932    };
933    shuf.v128()
934}
935
936#[stable(feature = "wasm_simd", since = "1.54.0")]
937pub use i8x16_shuffle as u8x16_shuffle;
938
939#[inline]
947#[cfg_attr(test,
948    assert_instr(
949        i8x16.shuffle,
950        I0 = 0,
951        I1 = 2,
952        I2 = 4,
953        I3 = 6,
954        I4 = 8,
955        I5 = 10,
956        I6 = 12,
957        I7 = 14,
958    )
959)]
960#[target_feature(enable = "simd128")]
961#[doc(alias("i8x16.shuffle"))]
962#[stable(feature = "wasm_simd", since = "1.54.0")]
963pub fn i16x8_shuffle<
964    const I0: usize,
965    const I1: usize,
966    const I2: usize,
967    const I3: usize,
968    const I4: usize,
969    const I5: usize,
970    const I6: usize,
971    const I7: usize,
972>(
973    a: v128,
974    b: v128,
975) -> v128 {
976    static_assert!(I0 < 16);
977    static_assert!(I1 < 16);
978    static_assert!(I2 < 16);
979    static_assert!(I3 < 16);
980    static_assert!(I4 < 16);
981    static_assert!(I5 < 16);
982    static_assert!(I6 < 16);
983    static_assert!(I7 < 16);
984    let shuf: simd::u16x8 = unsafe {
985        simd_shuffle!(
986            a.as_u16x8(),
987            b.as_u16x8(),
988            [
989                I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
990                I7 as u32,
991            ],
992        )
993    };
994    shuf.v128()
995}
996
997#[stable(feature = "wasm_simd", since = "1.54.0")]
998pub use i16x8_shuffle as u16x8_shuffle;
999
1000#[inline]
1008#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))]
1009#[target_feature(enable = "simd128")]
1010#[doc(alias("i8x16.shuffle"))]
1011#[stable(feature = "wasm_simd", since = "1.54.0")]
1012pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>(
1013    a: v128,
1014    b: v128,
1015) -> v128 {
1016    static_assert!(I0 < 8);
1017    static_assert!(I1 < 8);
1018    static_assert!(I2 < 8);
1019    static_assert!(I3 < 8);
1020    let shuf: simd::u32x4 = unsafe {
1021        simd_shuffle!(
1022            a.as_u32x4(),
1023            b.as_u32x4(),
1024            [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
1025        )
1026    };
1027    shuf.v128()
1028}
1029
1030#[stable(feature = "wasm_simd", since = "1.54.0")]
1031pub use i32x4_shuffle as u32x4_shuffle;
1032
1033#[inline]
1041#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))]
1042#[target_feature(enable = "simd128")]
1043#[doc(alias("i8x16.shuffle"))]
1044#[stable(feature = "wasm_simd", since = "1.54.0")]
1045pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
1046    static_assert!(I0 < 4);
1047    static_assert!(I1 < 4);
1048    let shuf: simd::u64x2 =
1049        unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) };
1050    shuf.v128()
1051}
1052
1053#[stable(feature = "wasm_simd", since = "1.54.0")]
1054pub use i64x2_shuffle as u64x2_shuffle;
1055
1056#[inline]
1061#[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))]
1062#[target_feature(enable = "simd128")]
1063#[doc(alias("i8x16.extract_lane_s"))]
1064#[stable(feature = "wasm_simd", since = "1.54.0")]
1065pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
1066    static_assert!(N < 16);
1067    unsafe { simd_extract!(a.as_i8x16(), N as u32) }
1068}
1069
1070#[inline]
1075#[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))]
1076#[target_feature(enable = "simd128")]
1077#[doc(alias("i8x16.extract_lane_u"))]
1078#[stable(feature = "wasm_simd", since = "1.54.0")]
1079pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
1080    static_assert!(N < 16);
1081    unsafe { simd_extract!(a.as_u8x16(), N as u32) }
1082}
1083
1084#[inline]
1089#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1090#[target_feature(enable = "simd128")]
1091#[doc(alias("i8x16.replace_lane"))]
1092#[stable(feature = "wasm_simd", since = "1.54.0")]
1093pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
1094    static_assert!(N < 16);
1095    unsafe { simd_insert!(a.as_i8x16(), N as u32, val).v128() }
1096}
1097
1098#[inline]
1103#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1104#[target_feature(enable = "simd128")]
1105#[doc(alias("i8x16.replace_lane"))]
1106#[stable(feature = "wasm_simd", since = "1.54.0")]
1107pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
1108    static_assert!(N < 16);
1109    unsafe { simd_insert!(a.as_u8x16(), N as u32, val).v128() }
1110}
1111
1112#[inline]
1117#[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))]
1118#[target_feature(enable = "simd128")]
1119#[doc(alias("i16x8.extract_lane_s"))]
1120#[stable(feature = "wasm_simd", since = "1.54.0")]
1121pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
1122    static_assert!(N < 8);
1123    unsafe { simd_extract!(a.as_i16x8(), N as u32) }
1124}
1125
1126#[inline]
1131#[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))]
1132#[target_feature(enable = "simd128")]
1133#[doc(alias("i16x8.extract_lane_u"))]
1134#[stable(feature = "wasm_simd", since = "1.54.0")]
1135pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
1136    static_assert!(N < 8);
1137    unsafe { simd_extract!(a.as_u16x8(), N as u32) }
1138}
1139
1140#[inline]
1145#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1146#[target_feature(enable = "simd128")]
1147#[doc(alias("i16x8.replace_lane"))]
1148#[stable(feature = "wasm_simd", since = "1.54.0")]
1149pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
1150    static_assert!(N < 8);
1151    unsafe { simd_insert!(a.as_i16x8(), N as u32, val).v128() }
1152}
1153
1154#[inline]
1159#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1160#[target_feature(enable = "simd128")]
1161#[doc(alias("i16x8.replace_lane"))]
1162#[stable(feature = "wasm_simd", since = "1.54.0")]
1163pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
1164    static_assert!(N < 8);
1165    unsafe { simd_insert!(a.as_u16x8(), N as u32, val).v128() }
1166}
1167
1168#[inline]
1173#[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))]
1174#[target_feature(enable = "simd128")]
1175#[doc(alias("i32x4.extract_lane"))]
1176#[stable(feature = "wasm_simd", since = "1.54.0")]
1177pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
1178    static_assert!(N < 4);
1179    unsafe { simd_extract!(a.as_i32x4(), N as u32) }
1180}
1181
1182#[inline]
1187#[target_feature(enable = "simd128")]
1188#[doc(alias("i32x4.extract_lane"))]
1189#[stable(feature = "wasm_simd", since = "1.54.0")]
1190pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
1191    i32x4_extract_lane::<N>(a) as u32
1192}
1193
1194#[inline]
1199#[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))]
1200#[target_feature(enable = "simd128")]
1201#[doc(alias("i32x4.replace_lane"))]
1202#[stable(feature = "wasm_simd", since = "1.54.0")]
1203pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
1204    static_assert!(N < 4);
1205    unsafe { simd_insert!(a.as_i32x4(), N as u32, val).v128() }
1206}
1207
1208#[inline]
1213#[target_feature(enable = "simd128")]
1214#[doc(alias("i32x4.replace_lane"))]
1215#[stable(feature = "wasm_simd", since = "1.54.0")]
1216pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
1217    i32x4_replace_lane::<N>(a, val as i32)
1218}
1219
1220#[inline]
1225#[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))]
1226#[target_feature(enable = "simd128")]
1227#[doc(alias("i64x2.extract_lane"))]
1228#[stable(feature = "wasm_simd", since = "1.54.0")]
1229pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
1230    static_assert!(N < 2);
1231    unsafe { simd_extract!(a.as_i64x2(), N as u32) }
1232}
1233
1234#[inline]
1239#[target_feature(enable = "simd128")]
1240#[doc(alias("i64x2.extract_lane"))]
1241#[stable(feature = "wasm_simd", since = "1.54.0")]
1242pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
1243    i64x2_extract_lane::<N>(a) as u64
1244}
1245
1246#[inline]
1251#[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))]
1252#[target_feature(enable = "simd128")]
1253#[doc(alias("i64x2.replace_lane"))]
1254#[stable(feature = "wasm_simd", since = "1.54.0")]
1255pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
1256    static_assert!(N < 2);
1257    unsafe { simd_insert!(a.as_i64x2(), N as u32, val).v128() }
1258}
1259
1260#[inline]
1265#[target_feature(enable = "simd128")]
1266#[doc(alias("i64x2.replace_lane"))]
1267#[stable(feature = "wasm_simd", since = "1.54.0")]
1268pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
1269    i64x2_replace_lane::<N>(a, val as i64)
1270}
1271
1272#[inline]
1277#[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))]
1278#[target_feature(enable = "simd128")]
1279#[doc(alias("f32x4.extract_lane"))]
1280#[stable(feature = "wasm_simd", since = "1.54.0")]
1281pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
1282    static_assert!(N < 4);
1283    unsafe { simd_extract!(a.as_f32x4(), N as u32) }
1284}
1285
1286#[inline]
1291#[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))]
1292#[target_feature(enable = "simd128")]
1293#[doc(alias("f32x4.replace_lane"))]
1294#[stable(feature = "wasm_simd", since = "1.54.0")]
1295pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
1296    static_assert!(N < 4);
1297    unsafe { simd_insert!(a.as_f32x4(), N as u32, val).v128() }
1298}
1299
1300#[inline]
1305#[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))]
1306#[target_feature(enable = "simd128")]
1307#[doc(alias("f64x2.extract_lane"))]
1308#[stable(feature = "wasm_simd", since = "1.54.0")]
1309pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
1310    static_assert!(N < 2);
1311    unsafe { simd_extract!(a.as_f64x2(), N as u32) }
1312}
1313
1314#[inline]
1319#[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))]
1320#[target_feature(enable = "simd128")]
1321#[doc(alias("f64x2.replace_lane"))]
1322#[stable(feature = "wasm_simd", since = "1.54.0")]
1323pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
1324    static_assert!(N < 2);
1325    unsafe { simd_insert!(a.as_f64x2(), N as u32, val).v128() }
1326}
1327
1328#[inline]
1334#[cfg_attr(test, assert_instr(i8x16.swizzle))]
1335#[target_feature(enable = "simd128")]
1336#[doc(alias("i8x16.swizzle"))]
1337#[stable(feature = "wasm_simd", since = "1.54.0")]
1338pub fn i8x16_swizzle(a: v128, s: v128) -> v128 {
1339    unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
1340}
1341
1342#[stable(feature = "wasm_simd", since = "1.54.0")]
1343pub use i8x16_swizzle as u8x16_swizzle;
1344
1345#[inline]
1349#[cfg_attr(test, assert_instr(i8x16.splat))]
1350#[target_feature(enable = "simd128")]
1351#[doc(alias("i8x16.splat"))]
1352#[stable(feature = "wasm_simd", since = "1.54.0")]
1353pub fn i8x16_splat(a: i8) -> v128 {
1354    simd::i8x16::splat(a).v128()
1355}
1356
1357#[inline]
1361#[cfg_attr(test, assert_instr(i8x16.splat))]
1362#[target_feature(enable = "simd128")]
1363#[doc(alias("i8x16.splat"))]
1364#[stable(feature = "wasm_simd", since = "1.54.0")]
1365pub fn u8x16_splat(a: u8) -> v128 {
1366    simd::u8x16::splat(a).v128()
1367}
1368
1369#[inline]
1373#[cfg_attr(test, assert_instr(i16x8.splat))]
1374#[target_feature(enable = "simd128")]
1375#[doc(alias("i16x8.splat"))]
1376#[stable(feature = "wasm_simd", since = "1.54.0")]
1377pub fn i16x8_splat(a: i16) -> v128 {
1378    simd::i16x8::splat(a).v128()
1379}
1380
1381#[inline]
1385#[cfg_attr(test, assert_instr(i16x8.splat))]
1386#[target_feature(enable = "simd128")]
1387#[doc(alias("i16x8.splat"))]
1388#[stable(feature = "wasm_simd", since = "1.54.0")]
1389pub fn u16x8_splat(a: u16) -> v128 {
1390    simd::u16x8::splat(a).v128()
1391}
1392
1393#[inline]
1397#[cfg_attr(test, assert_instr(i32x4.splat))]
1398#[target_feature(enable = "simd128")]
1399#[doc(alias("i32x4.splat"))]
1400#[stable(feature = "wasm_simd", since = "1.54.0")]
1401pub fn i32x4_splat(a: i32) -> v128 {
1402    simd::i32x4::splat(a).v128()
1403}
1404
1405#[inline]
1409#[target_feature(enable = "simd128")]
1410#[doc(alias("i32x4.splat"))]
1411#[stable(feature = "wasm_simd", since = "1.54.0")]
1412pub fn u32x4_splat(a: u32) -> v128 {
1413    i32x4_splat(a as i32)
1414}
1415
1416#[inline]
1420#[cfg_attr(test, assert_instr(i64x2.splat))]
1421#[target_feature(enable = "simd128")]
1422#[doc(alias("i64x2.splat"))]
1423#[stable(feature = "wasm_simd", since = "1.54.0")]
1424pub fn i64x2_splat(a: i64) -> v128 {
1425    simd::i64x2::splat(a).v128()
1426}
1427
1428#[inline]
1432#[target_feature(enable = "simd128")]
1433#[doc(alias("u64x2.splat"))]
1434#[stable(feature = "wasm_simd", since = "1.54.0")]
1435pub fn u64x2_splat(a: u64) -> v128 {
1436    i64x2_splat(a as i64)
1437}
1438
1439#[inline]
1443#[cfg_attr(test, assert_instr(f32x4.splat))]
1444#[target_feature(enable = "simd128")]
1445#[doc(alias("f32x4.splat"))]
1446#[stable(feature = "wasm_simd", since = "1.54.0")]
1447pub fn f32x4_splat(a: f32) -> v128 {
1448    simd::f32x4::splat(a).v128()
1449}
1450
1451#[inline]
1455#[cfg_attr(test, assert_instr(f64x2.splat))]
1456#[target_feature(enable = "simd128")]
1457#[doc(alias("f64x2.splat"))]
1458#[stable(feature = "wasm_simd", since = "1.54.0")]
1459pub fn f64x2_splat(a: f64) -> v128 {
1460    simd::f64x2::splat(a).v128()
1461}
1462
1463#[inline]
1469#[cfg_attr(test, assert_instr(i8x16.eq))]
1470#[target_feature(enable = "simd128")]
1471#[doc(alias("i8x16.eq"))]
1472#[stable(feature = "wasm_simd", since = "1.54.0")]
1473pub fn i8x16_eq(a: v128, b: v128) -> v128 {
1474    unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1475}
1476
1477#[inline]
1483#[cfg_attr(test, assert_instr(i8x16.ne))]
1484#[target_feature(enable = "simd128")]
1485#[doc(alias("i8x16.ne"))]
1486#[stable(feature = "wasm_simd", since = "1.54.0")]
1487pub fn i8x16_ne(a: v128, b: v128) -> v128 {
1488    unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1489}
1490
1491#[stable(feature = "wasm_simd", since = "1.54.0")]
1492pub use i8x16_eq as u8x16_eq;
1493#[stable(feature = "wasm_simd", since = "1.54.0")]
1494pub use i8x16_ne as u8x16_ne;
1495
1496#[inline]
1502#[cfg_attr(test, assert_instr(i8x16.lt_s))]
1503#[target_feature(enable = "simd128")]
1504#[doc(alias("i8x16.lt_s"))]
1505#[stable(feature = "wasm_simd", since = "1.54.0")]
1506pub fn i8x16_lt(a: v128, b: v128) -> v128 {
1507    unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1508}
1509
1510#[inline]
1516#[cfg_attr(test, assert_instr(i8x16.lt_u))]
1517#[target_feature(enable = "simd128")]
1518#[doc(alias("i8x16.lt_u"))]
1519#[stable(feature = "wasm_simd", since = "1.54.0")]
1520pub fn u8x16_lt(a: v128, b: v128) -> v128 {
1521    unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1522}
1523
1524#[inline]
1530#[cfg_attr(test, assert_instr(i8x16.gt_s))]
1531#[target_feature(enable = "simd128")]
1532#[doc(alias("i8x16.gt_s"))]
1533#[stable(feature = "wasm_simd", since = "1.54.0")]
1534pub fn i8x16_gt(a: v128, b: v128) -> v128 {
1535    unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1536}
1537
1538#[inline]
1544#[cfg_attr(test, assert_instr(i8x16.gt_u))]
1545#[target_feature(enable = "simd128")]
1546#[doc(alias("i8x16.gt_u"))]
1547#[stable(feature = "wasm_simd", since = "1.54.0")]
1548pub fn u8x16_gt(a: v128, b: v128) -> v128 {
1549    unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1550}
1551
1552#[inline]
1558#[cfg_attr(test, assert_instr(i8x16.le_s))]
1559#[target_feature(enable = "simd128")]
1560#[doc(alias("i8x16.le_s"))]
1561#[stable(feature = "wasm_simd", since = "1.54.0")]
1562pub fn i8x16_le(a: v128, b: v128) -> v128 {
1563    unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1564}
1565
1566#[inline]
1572#[cfg_attr(test, assert_instr(i8x16.le_u))]
1573#[target_feature(enable = "simd128")]
1574#[doc(alias("i8x16.le_u"))]
1575#[stable(feature = "wasm_simd", since = "1.54.0")]
1576pub fn u8x16_le(a: v128, b: v128) -> v128 {
1577    unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1578}
1579
1580#[inline]
1586#[cfg_attr(test, assert_instr(i8x16.ge_s))]
1587#[target_feature(enable = "simd128")]
1588#[doc(alias("i8x16.ge_s"))]
1589#[stable(feature = "wasm_simd", since = "1.54.0")]
1590pub fn i8x16_ge(a: v128, b: v128) -> v128 {
1591    unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1592}
1593
1594#[inline]
1600#[cfg_attr(test, assert_instr(i8x16.ge_u))]
1601#[target_feature(enable = "simd128")]
1602#[doc(alias("i8x16.ge_u"))]
1603#[stable(feature = "wasm_simd", since = "1.54.0")]
1604pub fn u8x16_ge(a: v128, b: v128) -> v128 {
1605    unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1606}
1607
1608#[inline]
1614#[cfg_attr(test, assert_instr(i16x8.eq))]
1615#[target_feature(enable = "simd128")]
1616#[doc(alias("i16x8.eq"))]
1617#[stable(feature = "wasm_simd", since = "1.54.0")]
1618pub fn i16x8_eq(a: v128, b: v128) -> v128 {
1619    unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1620}
1621
1622#[inline]
1628#[cfg_attr(test, assert_instr(i16x8.ne))]
1629#[target_feature(enable = "simd128")]
1630#[doc(alias("i16x8.ne"))]
1631#[stable(feature = "wasm_simd", since = "1.54.0")]
1632pub fn i16x8_ne(a: v128, b: v128) -> v128 {
1633    unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1634}
1635
1636#[stable(feature = "wasm_simd", since = "1.54.0")]
1637pub use i16x8_eq as u16x8_eq;
1638#[stable(feature = "wasm_simd", since = "1.54.0")]
1639pub use i16x8_ne as u16x8_ne;
1640
1641#[inline]
1647#[cfg_attr(test, assert_instr(i16x8.lt_s))]
1648#[target_feature(enable = "simd128")]
1649#[doc(alias("i16x8.lt_s"))]
1650#[stable(feature = "wasm_simd", since = "1.54.0")]
1651pub fn i16x8_lt(a: v128, b: v128) -> v128 {
1652    unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1653}
1654
1655#[inline]
1661#[cfg_attr(test, assert_instr(i16x8.lt_u))]
1662#[target_feature(enable = "simd128")]
1663#[doc(alias("i16x8.lt_u"))]
1664#[stable(feature = "wasm_simd", since = "1.54.0")]
1665pub fn u16x8_lt(a: v128, b: v128) -> v128 {
1666    unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1667}
1668
1669#[inline]
1675#[cfg_attr(test, assert_instr(i16x8.gt_s))]
1676#[target_feature(enable = "simd128")]
1677#[doc(alias("i16x8.gt_s"))]
1678#[stable(feature = "wasm_simd", since = "1.54.0")]
1679pub fn i16x8_gt(a: v128, b: v128) -> v128 {
1680    unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1681}
1682
1683#[inline]
1689#[cfg_attr(test, assert_instr(i16x8.gt_u))]
1690#[target_feature(enable = "simd128")]
1691#[doc(alias("i16x8.gt_u"))]
1692#[stable(feature = "wasm_simd", since = "1.54.0")]
1693pub fn u16x8_gt(a: v128, b: v128) -> v128 {
1694    unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1695}
1696
1697#[inline]
1703#[cfg_attr(test, assert_instr(i16x8.le_s))]
1704#[target_feature(enable = "simd128")]
1705#[doc(alias("i16x8.le_s"))]
1706#[stable(feature = "wasm_simd", since = "1.54.0")]
1707pub fn i16x8_le(a: v128, b: v128) -> v128 {
1708    unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1709}
1710
1711#[inline]
1717#[cfg_attr(test, assert_instr(i16x8.le_u))]
1718#[target_feature(enable = "simd128")]
1719#[doc(alias("i16x8.le_u"))]
1720#[stable(feature = "wasm_simd", since = "1.54.0")]
1721pub fn u16x8_le(a: v128, b: v128) -> v128 {
1722    unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1723}
1724
1725#[inline]
1731#[cfg_attr(test, assert_instr(i16x8.ge_s))]
1732#[target_feature(enable = "simd128")]
1733#[doc(alias("i16x8.ge_s"))]
1734#[stable(feature = "wasm_simd", since = "1.54.0")]
1735pub fn i16x8_ge(a: v128, b: v128) -> v128 {
1736    unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1737}
1738
1739#[inline]
1745#[cfg_attr(test, assert_instr(i16x8.ge_u))]
1746#[target_feature(enable = "simd128")]
1747#[doc(alias("i16x8.ge_u"))]
1748#[stable(feature = "wasm_simd", since = "1.54.0")]
1749pub fn u16x8_ge(a: v128, b: v128) -> v128 {
1750    unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1751}
1752
1753#[inline]
1759#[cfg_attr(test, assert_instr(i32x4.eq))]
1760#[target_feature(enable = "simd128")]
1761#[doc(alias("i32x4.eq"))]
1762#[stable(feature = "wasm_simd", since = "1.54.0")]
1763pub fn i32x4_eq(a: v128, b: v128) -> v128 {
1764    unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1765}
1766
1767#[inline]
1773#[cfg_attr(test, assert_instr(i32x4.ne))]
1774#[target_feature(enable = "simd128")]
1775#[doc(alias("i32x4.ne"))]
1776#[stable(feature = "wasm_simd", since = "1.54.0")]
1777pub fn i32x4_ne(a: v128, b: v128) -> v128 {
1778    unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1779}
1780
1781#[stable(feature = "wasm_simd", since = "1.54.0")]
1782pub use i32x4_eq as u32x4_eq;
1783#[stable(feature = "wasm_simd", since = "1.54.0")]
1784pub use i32x4_ne as u32x4_ne;
1785
1786#[inline]
1792#[cfg_attr(test, assert_instr(i32x4.lt_s))]
1793#[target_feature(enable = "simd128")]
1794#[doc(alias("i32x4.lt_s"))]
1795#[stable(feature = "wasm_simd", since = "1.54.0")]
1796pub fn i32x4_lt(a: v128, b: v128) -> v128 {
1797    unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1798}
1799
1800#[inline]
1806#[cfg_attr(test, assert_instr(i32x4.lt_u))]
1807#[target_feature(enable = "simd128")]
1808#[doc(alias("i32x4.lt_u"))]
1809#[stable(feature = "wasm_simd", since = "1.54.0")]
1810pub fn u32x4_lt(a: v128, b: v128) -> v128 {
1811    unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1812}
1813
1814#[inline]
1820#[cfg_attr(test, assert_instr(i32x4.gt_s))]
1821#[target_feature(enable = "simd128")]
1822#[doc(alias("i32x4.gt_s"))]
1823#[stable(feature = "wasm_simd", since = "1.54.0")]
1824pub fn i32x4_gt(a: v128, b: v128) -> v128 {
1825    unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1826}
1827
1828#[inline]
1834#[cfg_attr(test, assert_instr(i32x4.gt_u))]
1835#[target_feature(enable = "simd128")]
1836#[doc(alias("i32x4.gt_u"))]
1837#[stable(feature = "wasm_simd", since = "1.54.0")]
1838pub fn u32x4_gt(a: v128, b: v128) -> v128 {
1839    unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1840}
1841
1842#[inline]
1848#[cfg_attr(test, assert_instr(i32x4.le_s))]
1849#[target_feature(enable = "simd128")]
1850#[doc(alias("i32x4.le_s"))]
1851#[stable(feature = "wasm_simd", since = "1.54.0")]
1852pub fn i32x4_le(a: v128, b: v128) -> v128 {
1853    unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1854}
1855
1856#[inline]
1862#[cfg_attr(test, assert_instr(i32x4.le_u))]
1863#[target_feature(enable = "simd128")]
1864#[doc(alias("i32x4.le_u"))]
1865#[stable(feature = "wasm_simd", since = "1.54.0")]
1866pub fn u32x4_le(a: v128, b: v128) -> v128 {
1867    unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1868}
1869
1870#[inline]
1876#[cfg_attr(test, assert_instr(i32x4.ge_s))]
1877#[target_feature(enable = "simd128")]
1878#[doc(alias("i32x4.ge_s"))]
1879#[stable(feature = "wasm_simd", since = "1.54.0")]
1880pub fn i32x4_ge(a: v128, b: v128) -> v128 {
1881    unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1882}
1883
1884#[inline]
1890#[cfg_attr(test, assert_instr(i32x4.ge_u))]
1891#[target_feature(enable = "simd128")]
1892#[doc(alias("i32x4.ge_u"))]
1893#[stable(feature = "wasm_simd", since = "1.54.0")]
1894pub fn u32x4_ge(a: v128, b: v128) -> v128 {
1895    unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1896}
1897
1898#[inline]
1904#[cfg_attr(test, assert_instr(i64x2.eq))]
1905#[target_feature(enable = "simd128")]
1906#[doc(alias("i64x2.eq"))]
1907#[stable(feature = "wasm_simd", since = "1.54.0")]
1908pub fn i64x2_eq(a: v128, b: v128) -> v128 {
1909    unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1910}
1911
1912#[inline]
1918#[cfg_attr(test, assert_instr(i64x2.ne))]
1919#[target_feature(enable = "simd128")]
1920#[doc(alias("i64x2.ne"))]
1921#[stable(feature = "wasm_simd", since = "1.54.0")]
1922pub fn i64x2_ne(a: v128, b: v128) -> v128 {
1923    unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1924}
1925
1926#[stable(feature = "wasm_simd", since = "1.54.0")]
1927pub use i64x2_eq as u64x2_eq;
1928#[stable(feature = "wasm_simd", since = "1.54.0")]
1929pub use i64x2_ne as u64x2_ne;
1930
1931#[inline]
1937#[cfg_attr(test, assert_instr(i64x2.lt_s))]
1938#[target_feature(enable = "simd128")]
1939#[doc(alias("i64x2.lt_s"))]
1940#[stable(feature = "wasm_simd", since = "1.54.0")]
1941pub fn i64x2_lt(a: v128, b: v128) -> v128 {
1942    unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1943}
1944
1945#[inline]
1951#[cfg_attr(test, assert_instr(i64x2.gt_s))]
1952#[target_feature(enable = "simd128")]
1953#[doc(alias("i64x2.gt_s"))]
1954#[stable(feature = "wasm_simd", since = "1.54.0")]
1955pub fn i64x2_gt(a: v128, b: v128) -> v128 {
1956    unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1957}
1958
1959#[inline]
1965#[cfg_attr(test, assert_instr(i64x2.le_s))]
1966#[target_feature(enable = "simd128")]
1967#[doc(alias("i64x2.le_s"))]
1968#[stable(feature = "wasm_simd", since = "1.54.0")]
1969pub fn i64x2_le(a: v128, b: v128) -> v128 {
1970    unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1971}
1972
1973#[inline]
1979#[cfg_attr(test, assert_instr(i64x2.ge_s))]
1980#[target_feature(enable = "simd128")]
1981#[doc(alias("i64x2.ge_s"))]
1982#[stable(feature = "wasm_simd", since = "1.54.0")]
1983pub fn i64x2_ge(a: v128, b: v128) -> v128 {
1984    unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1985}
1986
1987#[inline]
1993#[cfg_attr(test, assert_instr(f32x4.eq))]
1994#[target_feature(enable = "simd128")]
1995#[doc(alias("f32x4.eq"))]
1996#[stable(feature = "wasm_simd", since = "1.54.0")]
1997pub fn f32x4_eq(a: v128, b: v128) -> v128 {
1998    unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1999}
2000
2001#[inline]
2007#[cfg_attr(test, assert_instr(f32x4.ne))]
2008#[target_feature(enable = "simd128")]
2009#[doc(alias("f32x4.ne"))]
2010#[stable(feature = "wasm_simd", since = "1.54.0")]
2011pub fn f32x4_ne(a: v128, b: v128) -> v128 {
2012    unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2013}
2014
2015#[inline]
2021#[cfg_attr(test, assert_instr(f32x4.lt))]
2022#[target_feature(enable = "simd128")]
2023#[doc(alias("f32x4.lt"))]
2024#[stable(feature = "wasm_simd", since = "1.54.0")]
2025pub fn f32x4_lt(a: v128, b: v128) -> v128 {
2026    unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2027}
2028
2029#[inline]
2035#[cfg_attr(test, assert_instr(f32x4.gt))]
2036#[target_feature(enable = "simd128")]
2037#[doc(alias("f32x4.gt"))]
2038#[stable(feature = "wasm_simd", since = "1.54.0")]
2039pub fn f32x4_gt(a: v128, b: v128) -> v128 {
2040    unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2041}
2042
2043#[inline]
2049#[cfg_attr(test, assert_instr(f32x4.le))]
2050#[target_feature(enable = "simd128")]
2051#[doc(alias("f32x4.le"))]
2052#[stable(feature = "wasm_simd", since = "1.54.0")]
2053pub fn f32x4_le(a: v128, b: v128) -> v128 {
2054    unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2055}
2056
2057#[inline]
2063#[cfg_attr(test, assert_instr(f32x4.ge))]
2064#[target_feature(enable = "simd128")]
2065#[doc(alias("f32x4.ge"))]
2066#[stable(feature = "wasm_simd", since = "1.54.0")]
2067pub fn f32x4_ge(a: v128, b: v128) -> v128 {
2068    unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2069}
2070
2071#[inline]
2077#[cfg_attr(test, assert_instr(f64x2.eq))]
2078#[target_feature(enable = "simd128")]
2079#[doc(alias("f64x2.eq"))]
2080#[stable(feature = "wasm_simd", since = "1.54.0")]
2081pub fn f64x2_eq(a: v128, b: v128) -> v128 {
2082    unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2083}
2084
2085#[inline]
2091#[cfg_attr(test, assert_instr(f64x2.ne))]
2092#[target_feature(enable = "simd128")]
2093#[doc(alias("f64x2.ne"))]
2094#[stable(feature = "wasm_simd", since = "1.54.0")]
2095pub fn f64x2_ne(a: v128, b: v128) -> v128 {
2096    unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2097}
2098
2099#[inline]
2105#[cfg_attr(test, assert_instr(f64x2.lt))]
2106#[target_feature(enable = "simd128")]
2107#[doc(alias("f64x2.lt"))]
2108#[stable(feature = "wasm_simd", since = "1.54.0")]
2109pub fn f64x2_lt(a: v128, b: v128) -> v128 {
2110    unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2111}
2112
2113#[inline]
2119#[cfg_attr(test, assert_instr(f64x2.gt))]
2120#[target_feature(enable = "simd128")]
2121#[doc(alias("f64x2.gt"))]
2122#[stable(feature = "wasm_simd", since = "1.54.0")]
2123pub fn f64x2_gt(a: v128, b: v128) -> v128 {
2124    unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2125}
2126
2127#[inline]
2133#[cfg_attr(test, assert_instr(f64x2.le))]
2134#[target_feature(enable = "simd128")]
2135#[doc(alias("f64x2.le"))]
2136#[stable(feature = "wasm_simd", since = "1.54.0")]
2137pub fn f64x2_le(a: v128, b: v128) -> v128 {
2138    unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2139}
2140
2141#[inline]
2147#[cfg_attr(test, assert_instr(f64x2.ge))]
2148#[target_feature(enable = "simd128")]
2149#[doc(alias("f64x2.ge"))]
2150#[stable(feature = "wasm_simd", since = "1.54.0")]
2151pub fn f64x2_ge(a: v128, b: v128) -> v128 {
2152    unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2153}
2154
2155#[inline]
2157#[cfg_attr(test, assert_instr(v128.not))]
2158#[target_feature(enable = "simd128")]
2159#[doc(alias("v128.not"))]
2160#[stable(feature = "wasm_simd", since = "1.54.0")]
2161pub fn v128_not(a: v128) -> v128 {
2162    unsafe { simd_xor(a.as_i64x2(), simd::i64x2::new(!0, !0)).v128() }
2163}
2164
2165#[inline]
2168#[cfg_attr(test, assert_instr(v128.and))]
2169#[target_feature(enable = "simd128")]
2170#[doc(alias("v128.and"))]
2171#[stable(feature = "wasm_simd", since = "1.54.0")]
2172pub fn v128_and(a: v128, b: v128) -> v128 {
2173    unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() }
2174}
2175
2176#[inline]
2180#[cfg_attr(test, assert_instr(v128.andnot))]
2181#[target_feature(enable = "simd128")]
2182#[doc(alias("v128.andnot"))]
2183#[stable(feature = "wasm_simd", since = "1.54.0")]
2184pub fn v128_andnot(a: v128, b: v128) -> v128 {
2185    unsafe {
2186        simd_and(
2187            a.as_i64x2(),
2188            simd_xor(b.as_i64x2(), simd::i64x2::new(-1, -1)),
2189        )
2190        .v128()
2191    }
2192}
2193
2194#[inline]
2197#[cfg_attr(test, assert_instr(v128.or))]
2198#[target_feature(enable = "simd128")]
2199#[doc(alias("v128.or"))]
2200#[stable(feature = "wasm_simd", since = "1.54.0")]
2201pub fn v128_or(a: v128, b: v128) -> v128 {
2202    unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() }
2203}
2204
2205#[inline]
2208#[cfg_attr(test, assert_instr(v128.xor))]
2209#[target_feature(enable = "simd128")]
2210#[doc(alias("v128.xor"))]
2211#[stable(feature = "wasm_simd", since = "1.54.0")]
2212pub fn v128_xor(a: v128, b: v128) -> v128 {
2213    unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() }
2214}
2215
2216#[inline]
2218#[cfg_attr(test, assert_instr(v128.bitselect))]
2219#[target_feature(enable = "simd128")]
2220#[doc(alias("v128.bitselect"))]
2221#[stable(feature = "wasm_simd", since = "1.54.0")]
2222pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
2223    unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() }
2224}
2225
2226#[inline]
2228#[cfg_attr(test, assert_instr(v128.any_true))]
2229#[target_feature(enable = "simd128")]
2230#[doc(alias("v128.any_true"))]
2231#[stable(feature = "wasm_simd", since = "1.54.0")]
2232pub fn v128_any_true(a: v128) -> bool {
2233    unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 }
2234}
2235
2236#[inline]
2238#[cfg_attr(test, assert_instr(i8x16.abs))]
2239#[target_feature(enable = "simd128")]
2240#[doc(alias("i8x16.abs"))]
2241#[stable(feature = "wasm_simd", since = "1.54.0")]
2242pub fn i8x16_abs(a: v128) -> v128 {
2243    unsafe {
2244        let a = a.as_i8x16();
2245        let zero = simd::i8x16::ZERO;
2246        simd_select::<simd::m8x16, simd::i8x16>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2247    }
2248}
2249
2250#[inline]
2252#[cfg_attr(test, assert_instr(i8x16.neg))]
2253#[target_feature(enable = "simd128")]
2254#[doc(alias("i8x16.neg"))]
2255#[stable(feature = "wasm_simd", since = "1.54.0")]
2256pub fn i8x16_neg(a: v128) -> v128 {
2257    unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() }
2258}
2259
2260#[inline]
2262#[cfg_attr(test, assert_instr(i8x16.popcnt))]
2263#[target_feature(enable = "simd128")]
2264#[doc(alias("i8x16.popcnt"))]
2265#[stable(feature = "wasm_simd", since = "1.54.0")]
2266pub fn i8x16_popcnt(v: v128) -> v128 {
2267    unsafe { simd_ctpop(v.as_i8x16()).v128() }
2268}
2269
2270#[stable(feature = "wasm_simd", since = "1.54.0")]
2271pub use i8x16_popcnt as u8x16_popcnt;
2272
2273#[inline]
2275#[cfg_attr(test, assert_instr(i8x16.all_true))]
2276#[target_feature(enable = "simd128")]
2277#[doc(alias("i8x16.all_true"))]
2278#[stable(feature = "wasm_simd", since = "1.54.0")]
2279pub fn i8x16_all_true(a: v128) -> bool {
2280    unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 }
2281}
2282
2283#[stable(feature = "wasm_simd", since = "1.54.0")]
2284pub use i8x16_all_true as u8x16_all_true;
2285
2286#[inline]
2289#[cfg_attr(test, assert_instr(i8x16.bitmask))]
2290#[target_feature(enable = "simd128")]
2291#[doc(alias("i8x16.bitmask"))]
2292#[stable(feature = "wasm_simd", since = "1.54.0")]
2293pub fn i8x16_bitmask(a: v128) -> u16 {
2294    unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
2295}
2296
2297#[stable(feature = "wasm_simd", since = "1.54.0")]
2298pub use i8x16_bitmask as u8x16_bitmask;
2299
2300#[inline]
2306#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
2307#[target_feature(enable = "simd128")]
2308#[doc(alias("i8x16.narrow_i16x8_s"))]
2309#[stable(feature = "wasm_simd", since = "1.54.0")]
2310pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2311    unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() }
2312}
2313
2314#[inline]
2320#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
2321#[target_feature(enable = "simd128")]
2322#[doc(alias("i8x16.narrow_i16x8_u"))]
2323#[stable(feature = "wasm_simd", since = "1.54.0")]
2324pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2325    unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() }
2326}
2327
2328#[inline]
2333#[cfg_attr(test, assert_instr(i8x16.shl))]
2334#[target_feature(enable = "simd128")]
2335#[doc(alias("i8x16.shl"))]
2336#[stable(feature = "wasm_simd", since = "1.54.0")]
2337pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
2338    unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() }
2339}
2340
2341#[stable(feature = "wasm_simd", since = "1.54.0")]
2342pub use i8x16_shl as u8x16_shl;
2343
2344#[inline]
2350#[cfg_attr(test, assert_instr(i8x16.shr_s))]
2351#[target_feature(enable = "simd128")]
2352#[doc(alias("i8x16.shr_s"))]
2353#[stable(feature = "wasm_simd", since = "1.54.0")]
2354pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2355    unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() }
2356}
2357
2358#[inline]
2364#[cfg_attr(test, assert_instr(i8x16.shr_u))]
2365#[target_feature(enable = "simd128")]
2366#[doc(alias("i8x16.shr_u"))]
2367#[stable(feature = "wasm_simd", since = "1.54.0")]
2368pub fn u8x16_shr(a: v128, amt: u32) -> v128 {
2369    unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8)).v128() }
2370}
2371
2372#[inline]
2374#[cfg_attr(test, assert_instr(i8x16.add))]
2375#[target_feature(enable = "simd128")]
2376#[doc(alias("i8x16.add"))]
2377#[stable(feature = "wasm_simd", since = "1.54.0")]
2378pub fn i8x16_add(a: v128, b: v128) -> v128 {
2379    unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() }
2380}
2381
2382#[stable(feature = "wasm_simd", since = "1.54.0")]
2383pub use i8x16_add as u8x16_add;
2384
2385#[inline]
2388#[cfg_attr(test, assert_instr(i8x16.add_sat_s))]
2389#[target_feature(enable = "simd128")]
2390#[doc(alias("i8x16.add_sat_s"))]
2391#[stable(feature = "wasm_simd", since = "1.54.0")]
2392pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2393    unsafe { simd_saturating_add(a.as_i8x16(), b.as_i8x16()).v128() }
2394}
2395
2396#[inline]
2399#[cfg_attr(test, assert_instr(i8x16.add_sat_u))]
2400#[target_feature(enable = "simd128")]
2401#[doc(alias("i8x16.add_sat_u"))]
2402#[stable(feature = "wasm_simd", since = "1.54.0")]
2403pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2404    unsafe { simd_saturating_add(a.as_u8x16(), b.as_u8x16()).v128() }
2405}
2406
2407#[inline]
2409#[cfg_attr(test, assert_instr(i8x16.sub))]
2410#[target_feature(enable = "simd128")]
2411#[doc(alias("i8x16.sub"))]
2412#[stable(feature = "wasm_simd", since = "1.54.0")]
2413pub fn i8x16_sub(a: v128, b: v128) -> v128 {
2414    unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2415}
2416
2417#[stable(feature = "wasm_simd", since = "1.54.0")]
2418pub use i8x16_sub as u8x16_sub;
2419
2420#[inline]
2423#[cfg_attr(test, assert_instr(i8x16.sub_sat_s))]
2424#[target_feature(enable = "simd128")]
2425#[doc(alias("i8x16.sub_sat_s"))]
2426#[stable(feature = "wasm_simd", since = "1.54.0")]
2427pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 {
2428    unsafe { llvm_i8x16_sub_sat_s(a.as_i8x16(), b.as_i8x16()).v128() }
2429}
2430
2431#[inline]
2434#[cfg_attr(test, assert_instr(i8x16.sub_sat_u))]
2435#[target_feature(enable = "simd128")]
2436#[doc(alias("i8x16.sub_sat_u"))]
2437#[stable(feature = "wasm_simd", since = "1.54.0")]
2438pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 {
2439    unsafe { llvm_i8x16_sub_sat_u(a.as_i8x16(), b.as_i8x16()).v128() }
2440}
2441
2442#[inline]
2445#[cfg_attr(test, assert_instr(i8x16.min_s))]
2446#[target_feature(enable = "simd128")]
2447#[doc(alias("i8x16.min_s"))]
2448#[stable(feature = "wasm_simd", since = "1.54.0")]
2449pub fn i8x16_min(a: v128, b: v128) -> v128 {
2450    let a = a.as_i8x16();
2451    let b = b.as_i8x16();
2452    unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2453}
2454
2455#[inline]
2458#[cfg_attr(test, assert_instr(i8x16.min_u))]
2459#[target_feature(enable = "simd128")]
2460#[doc(alias("i8x16.min_u"))]
2461#[stable(feature = "wasm_simd", since = "1.54.0")]
2462pub fn u8x16_min(a: v128, b: v128) -> v128 {
2463    let a = a.as_u8x16();
2464    let b = b.as_u8x16();
2465    unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2466}
2467
2468#[inline]
2471#[cfg_attr(test, assert_instr(i8x16.max_s))]
2472#[target_feature(enable = "simd128")]
2473#[doc(alias("i8x16.max_s"))]
2474#[stable(feature = "wasm_simd", since = "1.54.0")]
2475pub fn i8x16_max(a: v128, b: v128) -> v128 {
2476    let a = a.as_i8x16();
2477    let b = b.as_i8x16();
2478    unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2479}
2480
2481#[inline]
2484#[cfg_attr(test, assert_instr(i8x16.max_u))]
2485#[target_feature(enable = "simd128")]
2486#[doc(alias("i8x16.max_u"))]
2487#[stable(feature = "wasm_simd", since = "1.54.0")]
2488pub fn u8x16_max(a: v128, b: v128) -> v128 {
2489    let a = a.as_u8x16();
2490    let b = b.as_u8x16();
2491    unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2492}
2493
2494#[inline]
2496#[cfg_attr(test, assert_instr(i8x16.avgr_u))]
2497#[target_feature(enable = "simd128")]
2498#[doc(alias("i8x16.avgr_u"))]
2499#[stable(feature = "wasm_simd", since = "1.54.0")]
2500pub fn u8x16_avgr(a: v128, b: v128) -> v128 {
2501    unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() }
2502}
2503
2504#[inline]
2507#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))]
2508#[target_feature(enable = "simd128")]
2509#[doc(alias("i16x8.extadd_pairwise_i8x16_s"))]
2510#[stable(feature = "wasm_simd", since = "1.54.0")]
2511pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 {
2512    unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() }
2513}
2514
2515#[inline]
2518#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))]
2519#[target_feature(enable = "simd128")]
2520#[doc(alias("i16x8.extadd_pairwise_i8x16_u"))]
2521#[stable(feature = "wasm_simd", since = "1.54.0")]
2522pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 {
2523    unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() }
2524}
2525
2526#[stable(feature = "wasm_simd", since = "1.54.0")]
2527pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16;
2528
2529#[inline]
2531#[cfg_attr(test, assert_instr(i16x8.abs))]
2532#[target_feature(enable = "simd128")]
2533#[doc(alias("i16x8.abs"))]
2534#[stable(feature = "wasm_simd", since = "1.54.0")]
2535pub fn i16x8_abs(a: v128) -> v128 {
2536    let a = a.as_i16x8();
2537    let zero = simd::i16x8::ZERO;
2538    unsafe {
2539        simd_select::<simd::m16x8, simd::i16x8>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2540    }
2541}
2542
2543#[inline]
2545#[cfg_attr(test, assert_instr(i16x8.neg))]
2546#[target_feature(enable = "simd128")]
2547#[doc(alias("i16x8.neg"))]
2548#[stable(feature = "wasm_simd", since = "1.54.0")]
2549pub fn i16x8_neg(a: v128) -> v128 {
2550    unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() }
2551}
2552
2553#[inline]
2555#[cfg_attr(test, assert_instr(i16x8.q15mulr_sat_s))]
2556#[target_feature(enable = "simd128")]
2557#[doc(alias("i16x8.q15mulr_sat_s"))]
2558#[stable(feature = "wasm_simd", since = "1.54.0")]
2559pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 {
2560    unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() }
2561}
2562
2563#[inline]
2565#[cfg_attr(test, assert_instr(i16x8.all_true))]
2566#[target_feature(enable = "simd128")]
2567#[doc(alias("i16x8.all_true"))]
2568#[stable(feature = "wasm_simd", since = "1.54.0")]
2569pub fn i16x8_all_true(a: v128) -> bool {
2570    unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 }
2571}
2572
2573#[stable(feature = "wasm_simd", since = "1.54.0")]
2574pub use i16x8_all_true as u16x8_all_true;
2575
2576#[inline]
2579#[cfg_attr(test, assert_instr(i16x8.bitmask))]
2580#[target_feature(enable = "simd128")]
2581#[doc(alias("i16x8.bitmask"))]
2582#[stable(feature = "wasm_simd", since = "1.54.0")]
2583pub fn i16x8_bitmask(a: v128) -> u8 {
2584    unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 }
2585}
2586
2587#[stable(feature = "wasm_simd", since = "1.54.0")]
2588pub use i16x8_bitmask as u16x8_bitmask;
2589
2590#[inline]
2596#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
2597#[target_feature(enable = "simd128")]
2598#[doc(alias("i16x8.narrow_i32x4_s"))]
2599#[stable(feature = "wasm_simd", since = "1.54.0")]
2600pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2601    unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() }
2602}
2603
2604#[inline]
2610#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
2611#[target_feature(enable = "simd128")]
2612#[doc(alias("i16x8.narrow_i32x4_u"))]
2613#[stable(feature = "wasm_simd", since = "1.54.0")]
2614pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2615    unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() }
2616}
2617
2618#[inline]
2621#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))]
2622#[target_feature(enable = "simd128")]
2623#[doc(alias("i16x8.extend_low_i8x16_s"))]
2624#[stable(feature = "wasm_simd", since = "1.54.0")]
2625pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
2626    unsafe {
2627        simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2628            a.as_i8x16(),
2629            a.as_i8x16(),
2630            [0, 1, 2, 3, 4, 5, 6, 7],
2631        ))
2632        .v128()
2633    }
2634}
2635
2636#[inline]
2639#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))]
2640#[target_feature(enable = "simd128")]
2641#[doc(alias("i16x8.extend_high_i8x16_s"))]
2642#[stable(feature = "wasm_simd", since = "1.54.0")]
2643pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
2644    unsafe {
2645        simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2646            a.as_i8x16(),
2647            a.as_i8x16(),
2648            [8, 9, 10, 11, 12, 13, 14, 15],
2649        ))
2650        .v128()
2651    }
2652}
2653
2654#[inline]
2657#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))]
2658#[target_feature(enable = "simd128")]
2659#[doc(alias("i16x8.extend_low_i8x16_u"))]
2660#[stable(feature = "wasm_simd", since = "1.54.0")]
2661pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
2662    unsafe {
2663        simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2664            a.as_u8x16(),
2665            a.as_u8x16(),
2666            [0, 1, 2, 3, 4, 5, 6, 7],
2667        ))
2668        .v128()
2669    }
2670}
2671
2672#[stable(feature = "wasm_simd", since = "1.54.0")]
2673pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
2674
2675#[inline]
2678#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))]
2679#[target_feature(enable = "simd128")]
2680#[doc(alias("i16x8.extend_high_i8x16_u"))]
2681#[stable(feature = "wasm_simd", since = "1.54.0")]
2682pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
2683    unsafe {
2684        simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2685            a.as_u8x16(),
2686            a.as_u8x16(),
2687            [8, 9, 10, 11, 12, 13, 14, 15],
2688        ))
2689        .v128()
2690    }
2691}
2692
2693#[stable(feature = "wasm_simd", since = "1.54.0")]
2694pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2695
2696#[inline]
2701#[cfg_attr(test, assert_instr(i16x8.shl))]
2702#[target_feature(enable = "simd128")]
2703#[doc(alias("i16x8.shl"))]
2704#[stable(feature = "wasm_simd", since = "1.54.0")]
2705pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
2706    unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() }
2707}
2708
2709#[stable(feature = "wasm_simd", since = "1.54.0")]
2710pub use i16x8_shl as u16x8_shl;
2711
2712#[inline]
2718#[cfg_attr(test, assert_instr(i16x8.shr_s))]
2719#[target_feature(enable = "simd128")]
2720#[doc(alias("i16x8.shr_s"))]
2721#[stable(feature = "wasm_simd", since = "1.54.0")]
2722pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2723    unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() }
2724}
2725
2726#[inline]
2732#[cfg_attr(test, assert_instr(i16x8.shr_u))]
2733#[target_feature(enable = "simd128")]
2734#[doc(alias("i16x8.shr_u"))]
2735#[stable(feature = "wasm_simd", since = "1.54.0")]
2736pub fn u16x8_shr(a: v128, amt: u32) -> v128 {
2737    unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16)).v128() }
2738}
2739
2740#[inline]
2742#[cfg_attr(test, assert_instr(i16x8.add))]
2743#[target_feature(enable = "simd128")]
2744#[doc(alias("i16x8.add"))]
2745#[stable(feature = "wasm_simd", since = "1.54.0")]
2746pub fn i16x8_add(a: v128, b: v128) -> v128 {
2747    unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() }
2748}
2749
2750#[stable(feature = "wasm_simd", since = "1.54.0")]
2751pub use i16x8_add as u16x8_add;
2752
2753#[inline]
2756#[cfg_attr(test, assert_instr(i16x8.add_sat_s))]
2757#[target_feature(enable = "simd128")]
2758#[doc(alias("i16x8.add_sat_s"))]
2759#[stable(feature = "wasm_simd", since = "1.54.0")]
2760pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2761    unsafe { simd_saturating_add(a.as_i16x8(), b.as_i16x8()).v128() }
2762}
2763
2764#[inline]
2767#[cfg_attr(test, assert_instr(i16x8.add_sat_u))]
2768#[target_feature(enable = "simd128")]
2769#[doc(alias("i16x8.add_sat_u"))]
2770#[stable(feature = "wasm_simd", since = "1.54.0")]
2771pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2772    unsafe { simd_saturating_add(a.as_u16x8(), b.as_u16x8()).v128() }
2773}
2774
2775#[inline]
2777#[cfg_attr(test, assert_instr(i16x8.sub))]
2778#[target_feature(enable = "simd128")]
2779#[doc(alias("i16x8.sub"))]
2780#[stable(feature = "wasm_simd", since = "1.54.0")]
2781pub fn i16x8_sub(a: v128, b: v128) -> v128 {
2782    unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2783}
2784
2785#[stable(feature = "wasm_simd", since = "1.54.0")]
2786pub use i16x8_sub as u16x8_sub;
2787
2788#[inline]
2791#[cfg_attr(test, assert_instr(i16x8.sub_sat_s))]
2792#[target_feature(enable = "simd128")]
2793#[doc(alias("i16x8.sub_sat_s"))]
2794#[stable(feature = "wasm_simd", since = "1.54.0")]
2795pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 {
2796    unsafe { llvm_i16x8_sub_sat_s(a.as_i16x8(), b.as_i16x8()).v128() }
2797}
2798
2799#[inline]
2802#[cfg_attr(test, assert_instr(i16x8.sub_sat_u))]
2803#[target_feature(enable = "simd128")]
2804#[doc(alias("i16x8.sub_sat_u"))]
2805#[stable(feature = "wasm_simd", since = "1.54.0")]
2806pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 {
2807    unsafe { llvm_i16x8_sub_sat_u(a.as_i16x8(), b.as_i16x8()).v128() }
2808}
2809
2810#[inline]
2813#[cfg_attr(test, assert_instr(i16x8.mul))]
2814#[target_feature(enable = "simd128")]
2815#[doc(alias("i16x8.mul"))]
2816#[stable(feature = "wasm_simd", since = "1.54.0")]
2817pub fn i16x8_mul(a: v128, b: v128) -> v128 {
2818    unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() }
2819}
2820
2821#[stable(feature = "wasm_simd", since = "1.54.0")]
2822pub use i16x8_mul as u16x8_mul;
2823
2824#[inline]
2827#[cfg_attr(test, assert_instr(i16x8.min_s))]
2828#[target_feature(enable = "simd128")]
2829#[doc(alias("i16x8.min_s"))]
2830#[stable(feature = "wasm_simd", since = "1.54.0")]
2831pub fn i16x8_min(a: v128, b: v128) -> v128 {
2832    let a = a.as_i16x8();
2833    let b = b.as_i16x8();
2834    unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2835}
2836
2837#[inline]
2840#[cfg_attr(test, assert_instr(i16x8.min_u))]
2841#[target_feature(enable = "simd128")]
2842#[doc(alias("i16x8.min_u"))]
2843#[stable(feature = "wasm_simd", since = "1.54.0")]
2844pub fn u16x8_min(a: v128, b: v128) -> v128 {
2845    let a = a.as_u16x8();
2846    let b = b.as_u16x8();
2847    unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2848}
2849
2850#[inline]
2853#[cfg_attr(test, assert_instr(i16x8.max_s))]
2854#[target_feature(enable = "simd128")]
2855#[doc(alias("i16x8.max_s"))]
2856#[stable(feature = "wasm_simd", since = "1.54.0")]
2857pub fn i16x8_max(a: v128, b: v128) -> v128 {
2858    let a = a.as_i16x8();
2859    let b = b.as_i16x8();
2860    unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2861}
2862
2863#[inline]
2866#[cfg_attr(test, assert_instr(i16x8.max_u))]
2867#[target_feature(enable = "simd128")]
2868#[doc(alias("i16x8.max_u"))]
2869#[stable(feature = "wasm_simd", since = "1.54.0")]
2870pub fn u16x8_max(a: v128, b: v128) -> v128 {
2871    let a = a.as_u16x8();
2872    let b = b.as_u16x8();
2873    unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2874}
2875
2876#[inline]
2878#[cfg_attr(test, assert_instr(i16x8.avgr_u))]
2879#[target_feature(enable = "simd128")]
2880#[doc(alias("i16x8.avgr_u"))]
2881#[stable(feature = "wasm_simd", since = "1.54.0")]
2882pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
2883    unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() }
2884}
2885
2886#[inline]
2891#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))]
2892#[target_feature(enable = "simd128")]
2893#[doc(alias("i16x8.extmul_low_i8x16_s"))]
2894#[stable(feature = "wasm_simd", since = "1.54.0")]
2895pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
2896    unsafe {
2897        let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2898            a.as_i8x16(),
2899            a.as_i8x16(),
2900            [0, 1, 2, 3, 4, 5, 6, 7],
2901        ));
2902        let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2903            b.as_i8x16(),
2904            b.as_i8x16(),
2905            [0, 1, 2, 3, 4, 5, 6, 7],
2906        ));
2907        simd_mul(lhs, rhs).v128()
2908    }
2909}
2910
2911#[inline]
2916#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))]
2917#[target_feature(enable = "simd128")]
2918#[doc(alias("i16x8.extmul_high_i8x16_s"))]
2919#[stable(feature = "wasm_simd", since = "1.54.0")]
2920pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
2921    unsafe {
2922        let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2923            a.as_i8x16(),
2924            a.as_i8x16(),
2925            [8, 9, 10, 11, 12, 13, 14, 15],
2926        ));
2927        let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2928            b.as_i8x16(),
2929            b.as_i8x16(),
2930            [8, 9, 10, 11, 12, 13, 14, 15],
2931        ));
2932        simd_mul(lhs, rhs).v128()
2933    }
2934}
2935
2936#[inline]
2941#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))]
2942#[target_feature(enable = "simd128")]
2943#[doc(alias("i16x8.extmul_low_i8x16_u"))]
2944#[stable(feature = "wasm_simd", since = "1.54.0")]
2945pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
2946    unsafe {
2947        let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2948            a.as_u8x16(),
2949            a.as_u8x16(),
2950            [0, 1, 2, 3, 4, 5, 6, 7],
2951        ));
2952        let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2953            b.as_u8x16(),
2954            b.as_u8x16(),
2955            [0, 1, 2, 3, 4, 5, 6, 7],
2956        ));
2957        simd_mul(lhs, rhs).v128()
2958    }
2959}
2960
2961#[stable(feature = "wasm_simd", since = "1.54.0")]
2962pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
2963
2964#[inline]
2969#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))]
2970#[target_feature(enable = "simd128")]
2971#[doc(alias("i16x8.extmul_high_i8x16_u"))]
2972#[stable(feature = "wasm_simd", since = "1.54.0")]
2973pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
2974    unsafe {
2975        let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2976            a.as_u8x16(),
2977            a.as_u8x16(),
2978            [8, 9, 10, 11, 12, 13, 14, 15],
2979        ));
2980        let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2981            b.as_u8x16(),
2982            b.as_u8x16(),
2983            [8, 9, 10, 11, 12, 13, 14, 15],
2984        ));
2985        simd_mul(lhs, rhs).v128()
2986    }
2987}
2988
2989#[stable(feature = "wasm_simd", since = "1.54.0")]
2990pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16;
2991
2992#[inline]
2995#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))]
2996#[target_feature(enable = "simd128")]
2997#[doc(alias("i32x4.extadd_pairwise_i16x8_s"))]
2998#[stable(feature = "wasm_simd", since = "1.54.0")]
2999pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 {
3000    unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() }
3001}
3002
3003#[inline]
3006#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))]
3007#[doc(alias("i32x4.extadd_pairwise_i16x8_u"))]
3008#[target_feature(enable = "simd128")]
3009#[stable(feature = "wasm_simd", since = "1.54.0")]
3010pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 {
3011    unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() }
3012}
3013
3014#[stable(feature = "wasm_simd", since = "1.54.0")]
3015pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8;
3016
3017#[inline]
3019#[cfg_attr(test, assert_instr(i32x4.abs))]
3020#[target_feature(enable = "simd128")]
3021#[doc(alias("i32x4.abs"))]
3022#[stable(feature = "wasm_simd", since = "1.54.0")]
3023pub fn i32x4_abs(a: v128) -> v128 {
3024    let a = a.as_i32x4();
3025    let zero = simd::i32x4::ZERO;
3026    unsafe {
3027        simd_select::<simd::m32x4, simd::i32x4>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3028    }
3029}
3030
3031#[inline]
3033#[cfg_attr(test, assert_instr(i32x4.neg))]
3034#[target_feature(enable = "simd128")]
3035#[doc(alias("i32x4.neg"))]
3036#[stable(feature = "wasm_simd", since = "1.54.0")]
3037pub fn i32x4_neg(a: v128) -> v128 {
3038    unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() }
3039}
3040
3041#[inline]
3043#[cfg_attr(test, assert_instr(i32x4.all_true))]
3044#[target_feature(enable = "simd128")]
3045#[doc(alias("i32x4.all_true"))]
3046#[stable(feature = "wasm_simd", since = "1.54.0")]
3047pub fn i32x4_all_true(a: v128) -> bool {
3048    unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 }
3049}
3050
3051#[stable(feature = "wasm_simd", since = "1.54.0")]
3052pub use i32x4_all_true as u32x4_all_true;
3053
3054#[inline]
3057#[cfg_attr(test, assert_instr(i32x4.bitmask))]
3058#[target_feature(enable = "simd128")]
3059#[doc(alias("i32x4.bitmask"))]
3060#[stable(feature = "wasm_simd", since = "1.54.0")]
3061pub fn i32x4_bitmask(a: v128) -> u8 {
3062    unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 }
3063}
3064
3065#[stable(feature = "wasm_simd", since = "1.54.0")]
3066pub use i32x4_bitmask as u32x4_bitmask;
3067
3068#[inline]
3071#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))]
3072#[target_feature(enable = "simd128")]
3073#[doc(alias("i32x4.extend_low_i16x8_s"))]
3074#[stable(feature = "wasm_simd", since = "1.54.0")]
3075pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
3076    unsafe {
3077        simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3078            a.as_i16x8(),
3079            a.as_i16x8(),
3080            [0, 1, 2, 3]
3081        ))
3082        .v128()
3083    }
3084}
3085
3086#[inline]
3089#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))]
3090#[target_feature(enable = "simd128")]
3091#[doc(alias("i32x4.extend_high_i16x8_s"))]
3092#[stable(feature = "wasm_simd", since = "1.54.0")]
3093pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
3094    unsafe {
3095        simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3096            a.as_i16x8(),
3097            a.as_i16x8(),
3098            [4, 5, 6, 7]
3099        ))
3100        .v128()
3101    }
3102}
3103
3104#[inline]
3107#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))]
3108#[target_feature(enable = "simd128")]
3109#[doc(alias("i32x4.extend_low_i16x8_u"))]
3110#[stable(feature = "wasm_simd", since = "1.54.0")]
3111pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
3112    unsafe {
3113        simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3114            a.as_u16x8(),
3115            a.as_u16x8(),
3116            [0, 1, 2, 3]
3117        ))
3118        .v128()
3119    }
3120}
3121
3122#[stable(feature = "wasm_simd", since = "1.54.0")]
3123pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
3124
3125#[inline]
3128#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))]
3129#[target_feature(enable = "simd128")]
3130#[doc(alias("i32x4.extend_high_i16x8_u"))]
3131#[stable(feature = "wasm_simd", since = "1.54.0")]
3132pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
3133    unsafe {
3134        simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3135            a.as_u16x8(),
3136            a.as_u16x8(),
3137            [4, 5, 6, 7]
3138        ))
3139        .v128()
3140    }
3141}
3142
3143#[stable(feature = "wasm_simd", since = "1.54.0")]
3144pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3145
3146#[inline]
3151#[cfg_attr(test, assert_instr(i32x4.shl))]
3152#[target_feature(enable = "simd128")]
3153#[doc(alias("i32x4.shl"))]
3154#[stable(feature = "wasm_simd", since = "1.54.0")]
3155pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
3156    unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() }
3157}
3158
3159#[stable(feature = "wasm_simd", since = "1.54.0")]
3160pub use i32x4_shl as u32x4_shl;
3161
3162#[inline]
3168#[cfg_attr(test, assert_instr(i32x4.shr_s))]
3169#[target_feature(enable = "simd128")]
3170#[doc(alias("i32x4.shr_s"))]
3171#[stable(feature = "wasm_simd", since = "1.54.0")]
3172pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3173    unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() }
3174}
3175
3176#[inline]
3182#[cfg_attr(test, assert_instr(i32x4.shr_u))]
3183#[target_feature(enable = "simd128")]
3184#[doc(alias("i32x4.shr_u"))]
3185#[stable(feature = "wasm_simd", since = "1.54.0")]
3186pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
3187    unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt)).v128() }
3188}
3189
3190#[inline]
3192#[cfg_attr(test, assert_instr(i32x4.add))]
3193#[target_feature(enable = "simd128")]
3194#[doc(alias("i32x4.add"))]
3195#[stable(feature = "wasm_simd", since = "1.54.0")]
3196pub fn i32x4_add(a: v128, b: v128) -> v128 {
3197    unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() }
3198}
3199
3200#[stable(feature = "wasm_simd", since = "1.54.0")]
3201pub use i32x4_add as u32x4_add;
3202
3203#[inline]
3205#[cfg_attr(test, assert_instr(i32x4.sub))]
3206#[target_feature(enable = "simd128")]
3207#[doc(alias("i32x4.sub"))]
3208#[stable(feature = "wasm_simd", since = "1.54.0")]
3209pub fn i32x4_sub(a: v128, b: v128) -> v128 {
3210    unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() }
3211}
3212
3213#[stable(feature = "wasm_simd", since = "1.54.0")]
3214pub use i32x4_sub as u32x4_sub;
3215
3216#[inline]
3219#[cfg_attr(test, assert_instr(i32x4.mul))]
3220#[target_feature(enable = "simd128")]
3221#[doc(alias("i32x4.mul"))]
3222#[stable(feature = "wasm_simd", since = "1.54.0")]
3223pub fn i32x4_mul(a: v128, b: v128) -> v128 {
3224    unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() }
3225}
3226
3227#[stable(feature = "wasm_simd", since = "1.54.0")]
3228pub use i32x4_mul as u32x4_mul;
3229
3230#[inline]
3233#[cfg_attr(test, assert_instr(i32x4.min_s))]
3234#[target_feature(enable = "simd128")]
3235#[doc(alias("i32x4.min_s"))]
3236#[stable(feature = "wasm_simd", since = "1.54.0")]
3237pub fn i32x4_min(a: v128, b: v128) -> v128 {
3238    let a = a.as_i32x4();
3239    let b = b.as_i32x4();
3240    unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3241}
3242
3243#[inline]
3246#[cfg_attr(test, assert_instr(i32x4.min_u))]
3247#[target_feature(enable = "simd128")]
3248#[doc(alias("i32x4.min_u"))]
3249#[stable(feature = "wasm_simd", since = "1.54.0")]
3250pub fn u32x4_min(a: v128, b: v128) -> v128 {
3251    let a = a.as_u32x4();
3252    let b = b.as_u32x4();
3253    unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3254}
3255
3256#[inline]
3259#[cfg_attr(test, assert_instr(i32x4.max_s))]
3260#[target_feature(enable = "simd128")]
3261#[doc(alias("i32x4.max_s"))]
3262#[stable(feature = "wasm_simd", since = "1.54.0")]
3263pub fn i32x4_max(a: v128, b: v128) -> v128 {
3264    let a = a.as_i32x4();
3265    let b = b.as_i32x4();
3266    unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3267}
3268
3269#[inline]
3272#[cfg_attr(test, assert_instr(i32x4.max_u))]
3273#[target_feature(enable = "simd128")]
3274#[doc(alias("i32x4.max_u"))]
3275#[stable(feature = "wasm_simd", since = "1.54.0")]
3276pub fn u32x4_max(a: v128, b: v128) -> v128 {
3277    let a = a.as_u32x4();
3278    let b = b.as_u32x4();
3279    unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3280}
3281
3282#[inline]
3285#[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))]
3286#[target_feature(enable = "simd128")]
3287#[doc(alias("i32x4.dot_i16x8_s"))]
3288#[stable(feature = "wasm_simd", since = "1.54.0")]
3289pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
3290    unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() }
3291}
3292
3293#[inline]
3298#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))]
3299#[target_feature(enable = "simd128")]
3300#[doc(alias("i32x4.extmul_low_i16x8_s"))]
3301#[stable(feature = "wasm_simd", since = "1.54.0")]
3302pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
3303    unsafe {
3304        let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3305            a.as_i16x8(),
3306            a.as_i16x8(),
3307            [0, 1, 2, 3]
3308        ));
3309        let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3310            b.as_i16x8(),
3311            b.as_i16x8(),
3312            [0, 1, 2, 3]
3313        ));
3314        simd_mul(lhs, rhs).v128()
3315    }
3316}
3317
3318#[inline]
3323#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))]
3324#[target_feature(enable = "simd128")]
3325#[doc(alias("i32x4.extmul_high_i16x8_s"))]
3326#[stable(feature = "wasm_simd", since = "1.54.0")]
3327pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
3328    unsafe {
3329        let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3330            a.as_i16x8(),
3331            a.as_i16x8(),
3332            [4, 5, 6, 7]
3333        ));
3334        let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3335            b.as_i16x8(),
3336            b.as_i16x8(),
3337            [4, 5, 6, 7]
3338        ));
3339        simd_mul(lhs, rhs).v128()
3340    }
3341}
3342
3343#[inline]
3348#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))]
3349#[target_feature(enable = "simd128")]
3350#[doc(alias("i32x4.extmul_low_i16x8_u"))]
3351#[stable(feature = "wasm_simd", since = "1.54.0")]
3352pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
3353    unsafe {
3354        let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3355            a.as_u16x8(),
3356            a.as_u16x8(),
3357            [0, 1, 2, 3]
3358        ));
3359        let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3360            b.as_u16x8(),
3361            b.as_u16x8(),
3362            [0, 1, 2, 3]
3363        ));
3364        simd_mul(lhs, rhs).v128()
3365    }
3366}
3367
3368#[stable(feature = "wasm_simd", since = "1.54.0")]
3369pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
3370
3371#[inline]
3376#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))]
3377#[target_feature(enable = "simd128")]
3378#[doc(alias("i32x4.extmul_high_i16x8_u"))]
3379#[stable(feature = "wasm_simd", since = "1.54.0")]
3380pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
3381    unsafe {
3382        let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3383            a.as_u16x8(),
3384            a.as_u16x8(),
3385            [4, 5, 6, 7]
3386        ));
3387        let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3388            b.as_u16x8(),
3389            b.as_u16x8(),
3390            [4, 5, 6, 7]
3391        ));
3392        simd_mul(lhs, rhs).v128()
3393    }
3394}
3395
3396#[stable(feature = "wasm_simd", since = "1.54.0")]
3397pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
3398
3399#[inline]
3401#[cfg_attr(test, assert_instr(i64x2.abs))]
3402#[target_feature(enable = "simd128")]
3403#[doc(alias("i64x2.abs"))]
3404#[stable(feature = "wasm_simd", since = "1.54.0")]
3405pub fn i64x2_abs(a: v128) -> v128 {
3406    let a = a.as_i64x2();
3407    let zero = simd::i64x2::ZERO;
3408    unsafe {
3409        simd_select::<simd::m64x2, simd::i64x2>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3410    }
3411}
3412
3413#[inline]
3415#[cfg_attr(test, assert_instr(i64x2.neg))]
3416#[target_feature(enable = "simd128")]
3417#[doc(alias("i64x2.neg"))]
3418#[stable(feature = "wasm_simd", since = "1.54.0")]
3419pub fn i64x2_neg(a: v128) -> v128 {
3420    unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() }
3421}
3422
3423#[inline]
3425#[cfg_attr(test, assert_instr(i64x2.all_true))]
3426#[target_feature(enable = "simd128")]
3427#[doc(alias("i64x2.all_true"))]
3428#[stable(feature = "wasm_simd", since = "1.54.0")]
3429pub fn i64x2_all_true(a: v128) -> bool {
3430    unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 }
3431}
3432
3433#[stable(feature = "wasm_simd", since = "1.54.0")]
3434pub use i64x2_all_true as u64x2_all_true;
3435
3436#[inline]
3439#[cfg_attr(test, assert_instr(i64x2.bitmask))]
3440#[target_feature(enable = "simd128")]
3441#[doc(alias("i64x2.bitmask"))]
3442#[stable(feature = "wasm_simd", since = "1.54.0")]
3443pub fn i64x2_bitmask(a: v128) -> u8 {
3444    unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 }
3445}
3446
3447#[stable(feature = "wasm_simd", since = "1.54.0")]
3448pub use i64x2_bitmask as u64x2_bitmask;
3449
3450#[inline]
3453#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))]
3454#[target_feature(enable = "simd128")]
3455#[doc(alias("i64x2.extend_low_i32x4_s"))]
3456#[stable(feature = "wasm_simd", since = "1.54.0")]
3457pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
3458    unsafe {
3459        simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
3460            .v128()
3461    }
3462}
3463
3464#[inline]
3467#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))]
3468#[target_feature(enable = "simd128")]
3469#[doc(alias("i64x2.extend_high_i32x4_s"))]
3470#[stable(feature = "wasm_simd", since = "1.54.0")]
3471pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
3472    unsafe {
3473        simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
3474            .v128()
3475    }
3476}
3477
3478#[inline]
3481#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))]
3482#[target_feature(enable = "simd128")]
3483#[doc(alias("i64x2.extend_low_i32x4_u"))]
3484#[stable(feature = "wasm_simd", since = "1.54.0")]
3485pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
3486    unsafe {
3487        simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
3488            .v128()
3489    }
3490}
3491
3492#[stable(feature = "wasm_simd", since = "1.54.0")]
3493pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
3494
3495#[inline]
3498#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))]
3499#[target_feature(enable = "simd128")]
3500#[doc(alias("i64x2.extend_high_i32x4_u"))]
3501#[stable(feature = "wasm_simd", since = "1.54.0")]
3502pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
3503    unsafe {
3504        simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
3505            .v128()
3506    }
3507}
3508
3509#[stable(feature = "wasm_simd", since = "1.54.0")]
3510pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3511
3512#[inline]
3517#[cfg_attr(test, assert_instr(i64x2.shl))]
3518#[target_feature(enable = "simd128")]
3519#[doc(alias("i64x2.shl"))]
3520#[stable(feature = "wasm_simd", since = "1.54.0")]
3521pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
3522    unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() }
3523}
3524
3525#[stable(feature = "wasm_simd", since = "1.54.0")]
3526pub use i64x2_shl as u64x2_shl;
3527
3528#[inline]
3534#[cfg_attr(test, assert_instr(i64x2.shr_s))]
3535#[target_feature(enable = "simd128")]
3536#[doc(alias("i64x2.shr_s"))]
3537#[stable(feature = "wasm_simd", since = "1.54.0")]
3538pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3539    unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() }
3540}
3541
3542#[inline]
3548#[cfg_attr(test, assert_instr(i64x2.shr_u))]
3549#[target_feature(enable = "simd128")]
3550#[doc(alias("i64x2.shr_u"))]
3551#[stable(feature = "wasm_simd", since = "1.54.0")]
3552pub fn u64x2_shr(a: v128, amt: u32) -> v128 {
3553    unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64)).v128() }
3554}
3555
3556#[inline]
3558#[cfg_attr(test, assert_instr(i64x2.add))]
3559#[target_feature(enable = "simd128")]
3560#[doc(alias("i64x2.add"))]
3561#[stable(feature = "wasm_simd", since = "1.54.0")]
3562pub fn i64x2_add(a: v128, b: v128) -> v128 {
3563    unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() }
3564}
3565
3566#[stable(feature = "wasm_simd", since = "1.54.0")]
3567pub use i64x2_add as u64x2_add;
3568
3569#[inline]
3571#[cfg_attr(test, assert_instr(i64x2.sub))]
3572#[target_feature(enable = "simd128")]
3573#[doc(alias("i64x2.sub"))]
3574#[stable(feature = "wasm_simd", since = "1.54.0")]
3575pub fn i64x2_sub(a: v128, b: v128) -> v128 {
3576    unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() }
3577}
3578
3579#[stable(feature = "wasm_simd", since = "1.54.0")]
3580pub use i64x2_sub as u64x2_sub;
3581
3582#[inline]
3584#[cfg_attr(test, assert_instr(i64x2.mul))]
3585#[target_feature(enable = "simd128")]
3586#[doc(alias("i64x2.mul"))]
3587#[stable(feature = "wasm_simd", since = "1.54.0")]
3588pub fn i64x2_mul(a: v128, b: v128) -> v128 {
3589    unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() }
3590}
3591
3592#[stable(feature = "wasm_simd", since = "1.54.0")]
3593pub use i64x2_mul as u64x2_mul;
3594
3595#[inline]
3600#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))]
3601#[target_feature(enable = "simd128")]
3602#[doc(alias("i64x2.extmul_low_i32x4_s"))]
3603#[stable(feature = "wasm_simd", since = "1.54.0")]
3604pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
3605    unsafe {
3606        let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3607            a.as_i32x4(),
3608            a.as_i32x4(),
3609            [0, 1]
3610        ));
3611        let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3612            b.as_i32x4(),
3613            b.as_i32x4(),
3614            [0, 1]
3615        ));
3616        simd_mul(lhs, rhs).v128()
3617    }
3618}
3619
3620#[inline]
3625#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))]
3626#[target_feature(enable = "simd128")]
3627#[doc(alias("i64x2.extmul_high_i32x4_s"))]
3628#[stable(feature = "wasm_simd", since = "1.54.0")]
3629pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
3630    unsafe {
3631        let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3632            a.as_i32x4(),
3633            a.as_i32x4(),
3634            [2, 3]
3635        ));
3636        let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3637            b.as_i32x4(),
3638            b.as_i32x4(),
3639            [2, 3]
3640        ));
3641        simd_mul(lhs, rhs).v128()
3642    }
3643}
3644
3645#[inline]
3650#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))]
3651#[target_feature(enable = "simd128")]
3652#[doc(alias("i64x2.extmul_low_i32x4_u"))]
3653#[stable(feature = "wasm_simd", since = "1.54.0")]
3654pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
3655    unsafe {
3656        let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3657            a.as_u32x4(),
3658            a.as_u32x4(),
3659            [0, 1]
3660        ));
3661        let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3662            b.as_u32x4(),
3663            b.as_u32x4(),
3664            [0, 1]
3665        ));
3666        simd_mul(lhs, rhs).v128()
3667    }
3668}
3669
3670#[stable(feature = "wasm_simd", since = "1.54.0")]
3671pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
3672
3673#[inline]
3678#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))]
3679#[target_feature(enable = "simd128")]
3680#[doc(alias("i64x2.extmul_high_i32x4_u"))]
3681#[stable(feature = "wasm_simd", since = "1.54.0")]
3682pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
3683    unsafe {
3684        let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3685            a.as_u32x4(),
3686            a.as_u32x4(),
3687            [2, 3]
3688        ));
3689        let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3690            b.as_u32x4(),
3691            b.as_u32x4(),
3692            [2, 3]
3693        ));
3694        simd_mul(lhs, rhs).v128()
3695    }
3696}
3697
3698#[stable(feature = "wasm_simd", since = "1.54.0")]
3699pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
3700
3701#[inline]
3703#[cfg_attr(test, assert_instr(f32x4.ceil))]
3704#[target_feature(enable = "simd128")]
3705#[doc(alias("f32x4.ceil"))]
3706#[stable(feature = "wasm_simd", since = "1.54.0")]
3707pub fn f32x4_ceil(a: v128) -> v128 {
3708    unsafe { simd_ceil(a.as_f32x4()).v128() }
3709}
3710
3711#[inline]
3713#[cfg_attr(test, assert_instr(f32x4.floor))]
3714#[target_feature(enable = "simd128")]
3715#[doc(alias("f32x4.floor"))]
3716#[stable(feature = "wasm_simd", since = "1.54.0")]
3717pub fn f32x4_floor(a: v128) -> v128 {
3718    unsafe { simd_floor(a.as_f32x4()).v128() }
3719}
3720
3721#[inline]
3724#[cfg_attr(test, assert_instr(f32x4.trunc))]
3725#[target_feature(enable = "simd128")]
3726#[doc(alias("f32x4.trunc"))]
3727#[stable(feature = "wasm_simd", since = "1.54.0")]
3728pub fn f32x4_trunc(a: v128) -> v128 {
3729    unsafe { simd_trunc(a.as_f32x4()).v128() }
3730}
3731
3732#[inline]
3735#[cfg_attr(test, assert_instr(f32x4.nearest))]
3736#[target_feature(enable = "simd128")]
3737#[doc(alias("f32x4.nearest"))]
3738#[stable(feature = "wasm_simd", since = "1.54.0")]
3739pub fn f32x4_nearest(a: v128) -> v128 {
3740    unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() }
3741}
3742
3743#[inline]
3746#[cfg_attr(test, assert_instr(f32x4.abs))]
3747#[target_feature(enable = "simd128")]
3748#[doc(alias("f32x4.abs"))]
3749#[stable(feature = "wasm_simd", since = "1.54.0")]
3750pub fn f32x4_abs(a: v128) -> v128 {
3751    unsafe { simd_fabs(a.as_f32x4()).v128() }
3752}
3753
3754#[inline]
3757#[cfg_attr(test, assert_instr(f32x4.neg))]
3758#[target_feature(enable = "simd128")]
3759#[doc(alias("f32x4.neg"))]
3760#[stable(feature = "wasm_simd", since = "1.54.0")]
3761pub fn f32x4_neg(a: v128) -> v128 {
3762    unsafe { simd_neg(a.as_f32x4()).v128() }
3763}
3764
3765#[inline]
3768#[cfg_attr(test, assert_instr(f32x4.sqrt))]
3769#[target_feature(enable = "simd128")]
3770#[doc(alias("f32x4.sqrt"))]
3771#[stable(feature = "wasm_simd", since = "1.54.0")]
3772pub fn f32x4_sqrt(a: v128) -> v128 {
3773    unsafe { simd_fsqrt(a.as_f32x4()).v128() }
3774}
3775
3776#[inline]
3779#[cfg_attr(test, assert_instr(f32x4.add))]
3780#[target_feature(enable = "simd128")]
3781#[doc(alias("f32x4.add"))]
3782#[stable(feature = "wasm_simd", since = "1.54.0")]
3783pub fn f32x4_add(a: v128, b: v128) -> v128 {
3784    unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() }
3785}
3786
3787#[inline]
3790#[cfg_attr(test, assert_instr(f32x4.sub))]
3791#[target_feature(enable = "simd128")]
3792#[doc(alias("f32x4.sub"))]
3793#[stable(feature = "wasm_simd", since = "1.54.0")]
3794pub fn f32x4_sub(a: v128, b: v128) -> v128 {
3795    unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() }
3796}
3797
3798#[inline]
3801#[cfg_attr(test, assert_instr(f32x4.mul))]
3802#[target_feature(enable = "simd128")]
3803#[doc(alias("f32x4.mul"))]
3804#[stable(feature = "wasm_simd", since = "1.54.0")]
3805pub fn f32x4_mul(a: v128, b: v128) -> v128 {
3806    unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() }
3807}
3808
3809#[inline]
3812#[cfg_attr(test, assert_instr(f32x4.div))]
3813#[target_feature(enable = "simd128")]
3814#[doc(alias("f32x4.div"))]
3815#[stable(feature = "wasm_simd", since = "1.54.0")]
3816pub fn f32x4_div(a: v128, b: v128) -> v128 {
3817    unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() }
3818}
3819
3820#[inline]
3823#[cfg_attr(test, assert_instr(f32x4.min))]
3824#[target_feature(enable = "simd128")]
3825#[doc(alias("f32x4.min"))]
3826#[stable(feature = "wasm_simd", since = "1.54.0")]
3827pub fn f32x4_min(a: v128, b: v128) -> v128 {
3828    unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() }
3829}
3830
3831#[inline]
3834#[cfg_attr(test, assert_instr(f32x4.max))]
3835#[target_feature(enable = "simd128")]
3836#[doc(alias("f32x4.max"))]
3837#[stable(feature = "wasm_simd", since = "1.54.0")]
3838pub fn f32x4_max(a: v128, b: v128) -> v128 {
3839    unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() }
3840}
3841
3842#[inline]
3844#[cfg_attr(test, assert_instr(f32x4.pmin))]
3845#[target_feature(enable = "simd128")]
3846#[doc(alias("f32x4.pmin"))]
3847#[stable(feature = "wasm_simd", since = "1.54.0")]
3848pub fn f32x4_pmin(a: v128, b: v128) -> v128 {
3849    unsafe {
3850        simd_select::<simd::m32x4, simd::f32x4>(
3851            simd_lt(b.as_f32x4(), a.as_f32x4()),
3852            b.as_f32x4(),
3853            a.as_f32x4(),
3854        )
3855        .v128()
3856    }
3857}
3858
3859#[inline]
3861#[cfg_attr(test, assert_instr(f32x4.pmax))]
3862#[target_feature(enable = "simd128")]
3863#[doc(alias("f32x4.pmax"))]
3864#[stable(feature = "wasm_simd", since = "1.54.0")]
3865pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
3866    unsafe {
3867        simd_select::<simd::m32x4, simd::f32x4>(
3868            simd_lt(a.as_f32x4(), b.as_f32x4()),
3869            b.as_f32x4(),
3870            a.as_f32x4(),
3871        )
3872        .v128()
3873    }
3874}
3875
3876#[inline]
3878#[cfg_attr(test, assert_instr(f64x2.ceil))]
3879#[target_feature(enable = "simd128")]
3880#[doc(alias("f64x2.ceil"))]
3881#[stable(feature = "wasm_simd", since = "1.54.0")]
3882pub fn f64x2_ceil(a: v128) -> v128 {
3883    unsafe { simd_ceil(a.as_f64x2()).v128() }
3884}
3885
3886#[inline]
3888#[cfg_attr(test, assert_instr(f64x2.floor))]
3889#[target_feature(enable = "simd128")]
3890#[doc(alias("f64x2.floor"))]
3891#[stable(feature = "wasm_simd", since = "1.54.0")]
3892pub fn f64x2_floor(a: v128) -> v128 {
3893    unsafe { simd_floor(a.as_f64x2()).v128() }
3894}
3895
3896#[inline]
3899#[cfg_attr(test, assert_instr(f64x2.trunc))]
3900#[target_feature(enable = "simd128")]
3901#[doc(alias("f64x2.trunc"))]
3902#[stable(feature = "wasm_simd", since = "1.54.0")]
3903pub fn f64x2_trunc(a: v128) -> v128 {
3904    unsafe { simd_trunc(a.as_f64x2()).v128() }
3905}
3906
3907#[inline]
3910#[cfg_attr(test, assert_instr(f64x2.nearest))]
3911#[target_feature(enable = "simd128")]
3912#[doc(alias("f64x2.nearest"))]
3913#[stable(feature = "wasm_simd", since = "1.54.0")]
3914pub fn f64x2_nearest(a: v128) -> v128 {
3915    unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() }
3916}
3917
3918#[inline]
3921#[cfg_attr(test, assert_instr(f64x2.abs))]
3922#[target_feature(enable = "simd128")]
3923#[doc(alias("f64x2.abs"))]
3924#[stable(feature = "wasm_simd", since = "1.54.0")]
3925pub fn f64x2_abs(a: v128) -> v128 {
3926    unsafe { simd_fabs(a.as_f64x2()).v128() }
3927}
3928
3929#[inline]
3932#[cfg_attr(test, assert_instr(f64x2.neg))]
3933#[target_feature(enable = "simd128")]
3934#[doc(alias("f64x2.neg"))]
3935#[stable(feature = "wasm_simd", since = "1.54.0")]
3936pub fn f64x2_neg(a: v128) -> v128 {
3937    unsafe { simd_neg(a.as_f64x2()).v128() }
3938}
3939
3940#[inline]
3943#[cfg_attr(test, assert_instr(f64x2.sqrt))]
3944#[target_feature(enable = "simd128")]
3945#[doc(alias("f64x2.sqrt"))]
3946#[stable(feature = "wasm_simd", since = "1.54.0")]
3947pub fn f64x2_sqrt(a: v128) -> v128 {
3948    unsafe { simd_fsqrt(a.as_f64x2()).v128() }
3949}
3950
3951#[inline]
3954#[cfg_attr(test, assert_instr(f64x2.add))]
3955#[target_feature(enable = "simd128")]
3956#[doc(alias("f64x2.add"))]
3957#[stable(feature = "wasm_simd", since = "1.54.0")]
3958pub fn f64x2_add(a: v128, b: v128) -> v128 {
3959    unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() }
3960}
3961
3962#[inline]
3965#[cfg_attr(test, assert_instr(f64x2.sub))]
3966#[target_feature(enable = "simd128")]
3967#[doc(alias("f64x2.sub"))]
3968#[stable(feature = "wasm_simd", since = "1.54.0")]
3969pub fn f64x2_sub(a: v128, b: v128) -> v128 {
3970    unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() }
3971}
3972
3973#[inline]
3976#[cfg_attr(test, assert_instr(f64x2.mul))]
3977#[target_feature(enable = "simd128")]
3978#[doc(alias("f64x2.mul"))]
3979#[stable(feature = "wasm_simd", since = "1.54.0")]
3980pub fn f64x2_mul(a: v128, b: v128) -> v128 {
3981    unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() }
3982}
3983
3984#[inline]
3987#[cfg_attr(test, assert_instr(f64x2.div))]
3988#[target_feature(enable = "simd128")]
3989#[doc(alias("f64x2.div"))]
3990#[stable(feature = "wasm_simd", since = "1.54.0")]
3991pub fn f64x2_div(a: v128, b: v128) -> v128 {
3992    unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() }
3993}
3994
3995#[inline]
3998#[cfg_attr(test, assert_instr(f64x2.min))]
3999#[target_feature(enable = "simd128")]
4000#[doc(alias("f64x2.min"))]
4001#[stable(feature = "wasm_simd", since = "1.54.0")]
4002pub fn f64x2_min(a: v128, b: v128) -> v128 {
4003    unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() }
4004}
4005
4006#[inline]
4009#[cfg_attr(test, assert_instr(f64x2.max))]
4010#[target_feature(enable = "simd128")]
4011#[doc(alias("f64x2.max"))]
4012#[stable(feature = "wasm_simd", since = "1.54.0")]
4013pub fn f64x2_max(a: v128, b: v128) -> v128 {
4014    unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() }
4015}
4016
4017#[inline]
4019#[cfg_attr(test, assert_instr(f64x2.pmin))]
4020#[target_feature(enable = "simd128")]
4021#[doc(alias("f64x2.pmin"))]
4022#[stable(feature = "wasm_simd", since = "1.54.0")]
4023pub fn f64x2_pmin(a: v128, b: v128) -> v128 {
4024    unsafe {
4025        simd_select::<simd::m64x2, simd::f64x2>(
4026            simd_lt(b.as_f64x2(), a.as_f64x2()),
4027            b.as_f64x2(),
4028            a.as_f64x2(),
4029        )
4030        .v128()
4031    }
4032}
4033
4034#[inline]
4036#[cfg_attr(test, assert_instr(f64x2.pmax))]
4037#[target_feature(enable = "simd128")]
4038#[doc(alias("f64x2.pmax"))]
4039#[stable(feature = "wasm_simd", since = "1.54.0")]
4040pub fn f64x2_pmax(a: v128, b: v128) -> v128 {
4041    unsafe {
4042        simd_select::<simd::m64x2, simd::f64x2>(
4043            simd_lt(a.as_f64x2(), b.as_f64x2()),
4044            b.as_f64x2(),
4045            a.as_f64x2(),
4046        )
4047        .v128()
4048    }
4049}
4050
4051#[inline]
4057#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))]
4058#[target_feature(enable = "simd128")]
4059#[doc(alias("i32x4.trunc_sat_f32x4_s"))]
4060#[stable(feature = "wasm_simd", since = "1.54.0")]
4061pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 {
4062    unsafe { llvm_i32x4_trunc_sat_f32x4_s(a.as_f32x4()).v128() }
4063}
4064
4065#[inline]
4071#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))]
4072#[target_feature(enable = "simd128")]
4073#[doc(alias("i32x4.trunc_sat_f32x4_u"))]
4074#[stable(feature = "wasm_simd", since = "1.54.0")]
4075pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 {
4076    unsafe { llvm_i32x4_trunc_sat_f32x4_u(a.as_f32x4()).v128() }
4077}
4078
4079#[inline]
4082#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))]
4083#[target_feature(enable = "simd128")]
4084#[doc(alias("f32x4.convert_i32x4_s"))]
4085#[stable(feature = "wasm_simd", since = "1.54.0")]
4086pub fn f32x4_convert_i32x4(a: v128) -> v128 {
4087    unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() }
4088}
4089
4090#[inline]
4093#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))]
4094#[target_feature(enable = "simd128")]
4095#[doc(alias("f32x4.convert_i32x4_u"))]
4096#[stable(feature = "wasm_simd", since = "1.54.0")]
4097pub fn f32x4_convert_u32x4(a: v128) -> v128 {
4098    unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() }
4099}
4100
4101#[inline]
4110#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))]
4111#[target_feature(enable = "simd128")]
4112#[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))]
4113#[stable(feature = "wasm_simd", since = "1.54.0")]
4114pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4115    let ret: simd::i32x4 = unsafe {
4116        simd_shuffle!(
4117            llvm_i32x2_trunc_sat_f64x2_s(a.as_f64x2()),
4118            simd::i32x2::ZERO,
4119            [0, 1, 2, 3],
4120        )
4121    };
4122    ret.v128()
4123}
4124
4125#[inline]
4134#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))]
4135#[target_feature(enable = "simd128")]
4136#[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))]
4137#[stable(feature = "wasm_simd", since = "1.54.0")]
4138pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4139    let ret: simd::i32x4 = unsafe {
4140        simd_shuffle!(
4141            llvm_i32x2_trunc_sat_f64x2_u(a.as_f64x2()),
4142            simd::i32x2::ZERO,
4143            [0, 1, 2, 3],
4144        )
4145    };
4146    ret.v128()
4147}
4148
4149#[inline]
4151#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))]
4152#[target_feature(enable = "simd128")]
4153#[doc(alias("f64x2.convert_low_i32x4_s"))]
4154#[stable(feature = "wasm_simd", since = "1.54.0")]
4155pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
4156    unsafe {
4157        simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
4158            .v128()
4159    }
4160}
4161
4162#[inline]
4164#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))]
4165#[target_feature(enable = "simd128")]
4166#[doc(alias("f64x2.convert_low_i32x4_u"))]
4167#[stable(feature = "wasm_simd", since = "1.54.0")]
4168pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
4169    unsafe {
4170        simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
4171            .v128()
4172    }
4173}
4174
4175#[inline]
4181#[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))]
4182#[target_feature(enable = "simd128")]
4183#[doc(alias("f32x4.demote_f64x2_zero"))]
4184#[stable(feature = "wasm_simd", since = "1.54.0")]
4185pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
4186    unsafe {
4187        simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle!(
4188            a.as_f64x2(),
4189            simd::f64x2::ZERO,
4190            [0, 1, 2, 3]
4191        ))
4192        .v128()
4193    }
4194}
4195
4196#[inline]
4199#[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))]
4200#[target_feature(enable = "simd128")]
4201#[doc(alias("f32x4.promote_low_f32x4"))]
4202#[stable(feature = "wasm_simd", since = "1.54.0")]
4203pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
4204    unsafe {
4205        simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
4206            .v128()
4207    }
4208}
4209
4210#[cfg(test)]
4211mod tests {
4212    use super::*;
4213    use core::ops::{Add, Div, Mul, Neg, Sub};
4214
4215    use std::fmt::Debug;
4216    use std::mem::transmute;
4217    use std::num::Wrapping;
4218    use std::prelude::v1::*;
4219
4220    const _C1: v128 = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4221    const _C2: v128 = u8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4222    const _C3: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4223    const _C4: v128 = u16x8(0, 1, 2, 3, 4, 5, 6, 7);
4224    const _C5: v128 = i32x4(0, 1, 2, 3);
4225    const _C6: v128 = u32x4(0, 1, 2, 3);
4226    const _C7: v128 = i64x2(0, 1);
4227    const _C8: v128 = u64x2(0, 1);
4228    const _C9: v128 = f32x4(0.0, 1.0, 2.0, 3.0);
4229    const _C10: v128 = f64x2(0.0, 1.0);
4230
4231    fn compare_bytes(a: v128, b: v128) {
4232        let a: [u8; 16] = unsafe { transmute(a) };
4233        let b: [u8; 16] = unsafe { transmute(b) };
4234        assert_eq!(a, b);
4235    }
4236
4237    #[test]
4238    fn test_load() {
4239        unsafe {
4240            let arr: [i32; 4] = [0, 1, 2, 3];
4241            let vec = v128_load(arr.as_ptr() as *const v128);
4242            compare_bytes(vec, i32x4(0, 1, 2, 3));
4243        }
4244    }
4245
4246    #[test]
4247    fn test_load_extend() {
4248        unsafe {
4249            let arr: [i8; 8] = [-3, -2, -1, 0, 1, 2, 3, 4];
4250            let vec = i16x8_load_extend_i8x8(arr.as_ptr());
4251            compare_bytes(vec, i16x8(-3, -2, -1, 0, 1, 2, 3, 4));
4252            let vec = i16x8_load_extend_u8x8(arr.as_ptr() as *const u8);
4253            compare_bytes(vec, i16x8(253, 254, 255, 0, 1, 2, 3, 4));
4254
4255            let arr: [i16; 4] = [-1, 0, 1, 2];
4256            let vec = i32x4_load_extend_i16x4(arr.as_ptr());
4257            compare_bytes(vec, i32x4(-1, 0, 1, 2));
4258            let vec = i32x4_load_extend_u16x4(arr.as_ptr() as *const u16);
4259            compare_bytes(vec, i32x4(65535, 0, 1, 2));
4260
4261            let arr: [i32; 2] = [-1, 1];
4262            let vec = i64x2_load_extend_i32x2(arr.as_ptr());
4263            compare_bytes(vec, i64x2(-1, 1));
4264            let vec = i64x2_load_extend_u32x2(arr.as_ptr() as *const u32);
4265            compare_bytes(vec, i64x2(u32::max_value().into(), 1));
4266        }
4267    }
4268
4269    #[test]
4270    fn test_load_splat() {
4271        unsafe {
4272            compare_bytes(v128_load8_splat(&8), i8x16_splat(8));
4273            compare_bytes(v128_load16_splat(&9), i16x8_splat(9));
4274            compare_bytes(v128_load32_splat(&10), i32x4_splat(10));
4275            compare_bytes(v128_load64_splat(&11), i64x2_splat(11));
4276        }
4277    }
4278
4279    #[test]
4280    fn test_load_zero() {
4281        unsafe {
4282            compare_bytes(v128_load32_zero(&10), i32x4(10, 0, 0, 0));
4283            compare_bytes(v128_load64_zero(&11), i64x2(11, 0));
4284        }
4285    }
4286
4287    #[test]
4288    fn test_store() {
4289        unsafe {
4290            let mut spot = i8x16_splat(0);
4291            v128_store(&mut spot, i8x16_splat(1));
4292            compare_bytes(spot, i8x16_splat(1));
4293        }
4294    }
4295
4296    #[test]
4297    fn test_load_lane() {
4298        unsafe {
4299            let zero = i8x16_splat(0);
4300            compare_bytes(
4301                v128_load8_lane::<2>(zero, &1),
4302                i8x16_replace_lane::<2>(zero, 1),
4303            );
4304
4305            compare_bytes(
4306                v128_load16_lane::<2>(zero, &1),
4307                i16x8_replace_lane::<2>(zero, 1),
4308            );
4309
4310            compare_bytes(
4311                v128_load32_lane::<2>(zero, &1),
4312                i32x4_replace_lane::<2>(zero, 1),
4313            );
4314
4315            compare_bytes(
4316                v128_load64_lane::<1>(zero, &1),
4317                i64x2_replace_lane::<1>(zero, 1),
4318            );
4319        }
4320    }
4321
4322    #[test]
4323    fn test_store_lane() {
4324        unsafe {
4325            let mut spot = 0;
4326            let zero = i8x16_splat(0);
4327            v128_store8_lane::<5>(i8x16_replace_lane::<5>(zero, 7), &mut spot);
4328            assert_eq!(spot, 7);
4329
4330            let mut spot = 0;
4331            v128_store16_lane::<5>(i16x8_replace_lane::<5>(zero, 7), &mut spot);
4332            assert_eq!(spot, 7);
4333
4334            let mut spot = 0;
4335            v128_store32_lane::<3>(i32x4_replace_lane::<3>(zero, 7), &mut spot);
4336            assert_eq!(spot, 7);
4337
4338            let mut spot = 0;
4339            v128_store64_lane::<0>(i64x2_replace_lane::<0>(zero, 7), &mut spot);
4340            assert_eq!(spot, 7);
4341        }
4342    }
4343
4344    #[test]
4345    fn test_i8x16() {
4346        const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4347        compare_bytes(A, A);
4348
4349        const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4350        const _: v128 = i32x4(0, 1, 2, 3);
4351        const _: v128 = i64x2(0, 1);
4352        const _: v128 = f32x4(0., 1., 2., 3.);
4353        const _: v128 = f64x2(0., 1.);
4354
4355        let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) };
4356        assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]);
4357        let bytes: [i8; 16] = unsafe {
4358            mem::transmute(i8x16(
4359                -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16,
4360            ))
4361        };
4362        assert_eq!(
4363            bytes,
4364            [-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16]
4365        );
4366    }
4367
4368    #[test]
4369    fn test_shuffle() {
4370        let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4371        let vec_b = i8x16(
4372            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
4373        );
4374
4375        let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
4376            vec_a, vec_b,
4377        );
4378        let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
4379        compare_bytes(vec_r, vec_e);
4380
4381        let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4382        let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15);
4383        let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b);
4384        let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14);
4385        compare_bytes(vec_r, vec_e);
4386
4387        let vec_a = i32x4(0, 1, 2, 3);
4388        let vec_b = i32x4(4, 5, 6, 7);
4389        let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b);
4390        let vec_e = i32x4(0, 4, 2, 6);
4391        compare_bytes(vec_r, vec_e);
4392
4393        let vec_a = i64x2(0, 1);
4394        let vec_b = i64x2(2, 3);
4395        let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b);
4396        let vec_e = i64x2(0, 2);
4397        compare_bytes(vec_r, vec_e);
4398    }
4399
4400    macro_rules! test_extract {
4402        (
4403            name: $test_id:ident,
4404            extract: $extract:ident,
4405            replace: $replace:ident,
4406            elem: $elem:ty,
4407            count: $count:expr,
4408            indices: [$($idx:expr),*],
4409        ) => {
4410            #[test]
4411            fn $test_id() {
4412                unsafe {
4413                    let arr: [$elem; $count] = [123 as $elem; $count];
4414                    let vec: v128 = transmute(arr);
4415                    $(
4416                        assert_eq!($extract::<$idx>(vec), 123 as $elem);
4417                    )*
4418
4419                    let arr: [$elem; $count] = [$($idx as $elem),*];
4422                    let vec: v128 = transmute(arr);
4423                    $(
4424                        assert_eq!($extract::<$idx>(vec), $idx as $elem);
4425
4426                        let tmp = $replace::<$idx>(vec, 124 as $elem);
4427                        assert_eq!($extract::<$idx>(tmp), 124 as $elem);
4428                    )*
4429                }
4430            }
4431        }
4432    }
4433
4434    test_extract! {
4435        name: test_i8x16_extract_replace,
4436        extract: i8x16_extract_lane,
4437        replace: i8x16_replace_lane,
4438        elem: i8,
4439        count: 16,
4440        indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
4441    }
4442    test_extract! {
4443        name: test_i16x8_extract_replace,
4444        extract: i16x8_extract_lane,
4445        replace: i16x8_replace_lane,
4446        elem: i16,
4447        count: 8,
4448        indices: [0, 1, 2, 3, 4, 5, 6, 7],
4449    }
4450    test_extract! {
4451        name: test_i32x4_extract_replace,
4452        extract: i32x4_extract_lane,
4453        replace: i32x4_replace_lane,
4454        elem: i32,
4455        count: 4,
4456        indices: [0, 1, 2, 3],
4457    }
4458    test_extract! {
4459        name: test_i64x2_extract_replace,
4460        extract: i64x2_extract_lane,
4461        replace: i64x2_replace_lane,
4462        elem: i64,
4463        count: 2,
4464        indices: [0, 1],
4465    }
4466    test_extract! {
4467        name: test_f32x4_extract_replace,
4468        extract: f32x4_extract_lane,
4469        replace: f32x4_replace_lane,
4470        elem: f32,
4471        count: 4,
4472        indices: [0, 1, 2, 3],
4473    }
4474    test_extract! {
4475        name: test_f64x2_extract_replace,
4476        extract: f64x2_extract_lane,
4477        replace: f64x2_replace_lane,
4478        elem: f64,
4479        count: 2,
4480        indices: [0, 1],
4481    }
4482
4483    #[test]
4484    #[rustfmt::skip]
4485    fn test_swizzle() {
4486        compare_bytes(
4487            i8x16_swizzle(
4488                i32x4(1, 2, 3, 4),
4489                i8x16(
4490                    32, 31, 30, 29,
4491                    0, 1, 2, 3,
4492                    12, 13, 14, 15,
4493                    0, 4, 8, 12),
4494            ),
4495            i32x4(0, 1, 4, 0x04030201),
4496        );
4497    }
4498
4499    macro_rules! test_splat {
4500        ($test_id:ident: $val:expr => $($vals:expr),*) => {
4501            #[test]
4502            fn $test_id() {
4503                let a = super::$test_id($val);
4504                let b = u8x16($($vals as u8),*);
4505                compare_bytes(a, b);
4506            }
4507        }
4508    }
4509
4510    mod splats {
4511        use super::*;
4512        test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
4513        test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
4514        test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
4515        test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
4516        test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
4517        test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
4518    }
4519
4520    #[test]
4521    fn test_bitmasks() {
4522        let zero = i8x16_splat(0);
4523        let ones = i8x16_splat(!0);
4524
4525        assert_eq!(i8x16_bitmask(zero), 0);
4526        assert_eq!(i8x16_bitmask(ones), 0xffff);
4527        assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0);
4528        assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff);
4529        assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10);
4530
4531        assert_eq!(i16x8_bitmask(zero), 0);
4532        assert_eq!(i16x8_bitmask(ones), 0xff);
4533        assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0);
4534        assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff);
4535        assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10);
4536
4537        assert_eq!(i32x4_bitmask(zero), 0);
4538        assert_eq!(i32x4_bitmask(ones), 0b1111);
4539        assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0);
4540        assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111);
4541        assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10);
4542
4543        assert_eq!(i64x2_bitmask(zero), 0);
4544        assert_eq!(i64x2_bitmask(ones), 0b11);
4545        assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0);
4546        assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11);
4547        assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10);
4548    }
4549
4550    #[test]
4551    fn test_narrow() {
4552        let zero = i8x16_splat(0);
4553        let ones = i8x16_splat(!0);
4554
4555        compare_bytes(i8x16_narrow_i16x8(zero, zero), zero);
4556        compare_bytes(u8x16_narrow_i16x8(zero, zero), zero);
4557        compare_bytes(i8x16_narrow_i16x8(ones, ones), ones);
4558        compare_bytes(u8x16_narrow_i16x8(ones, ones), zero);
4559
4560        compare_bytes(
4561            i8x16_narrow_i16x8(
4562                i16x8(
4563                    0,
4564                    1,
4565                    2,
4566                    -1,
4567                    i8::MIN.into(),
4568                    i8::MAX.into(),
4569                    u8::MIN.into(),
4570                    u8::MAX.into(),
4571                ),
4572                i16x8(
4573                    i16::MIN,
4574                    i16::MAX,
4575                    u16::MIN as i16,
4576                    u16::MAX as i16,
4577                    0,
4578                    0,
4579                    0,
4580                    0,
4581                ),
4582            ),
4583            i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0),
4584        );
4585
4586        compare_bytes(
4587            u8x16_narrow_i16x8(
4588                i16x8(
4589                    0,
4590                    1,
4591                    2,
4592                    -1,
4593                    i8::MIN.into(),
4594                    i8::MAX.into(),
4595                    u8::MIN.into(),
4596                    u8::MAX.into(),
4597                ),
4598                i16x8(
4599                    i16::MIN,
4600                    i16::MAX,
4601                    u16::MIN as i16,
4602                    u16::MAX as i16,
4603                    0,
4604                    0,
4605                    0,
4606                    0,
4607                ),
4608            ),
4609            i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0),
4610        );
4611
4612        compare_bytes(i16x8_narrow_i32x4(zero, zero), zero);
4613        compare_bytes(u16x8_narrow_i32x4(zero, zero), zero);
4614        compare_bytes(i16x8_narrow_i32x4(ones, ones), ones);
4615        compare_bytes(u16x8_narrow_i32x4(ones, ones), zero);
4616
4617        compare_bytes(
4618            i16x8_narrow_i32x4(
4619                i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
4620                i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4621            ),
4622            i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
4623        );
4624
4625        compare_bytes(
4626            u16x8_narrow_i32x4(
4627                i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
4628                i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4629            ),
4630            i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
4631        );
4632    }
4633
4634    #[test]
4635    fn test_extend() {
4636        let zero = i8x16_splat(0);
4637        let ones = i8x16_splat(!0);
4638
4639        compare_bytes(i16x8_extend_low_i8x16(zero), zero);
4640        compare_bytes(i16x8_extend_high_i8x16(zero), zero);
4641        compare_bytes(i16x8_extend_low_u8x16(zero), zero);
4642        compare_bytes(i16x8_extend_high_u8x16(zero), zero);
4643        compare_bytes(i16x8_extend_low_i8x16(ones), ones);
4644        compare_bytes(i16x8_extend_high_i8x16(ones), ones);
4645        let halves = u16x8_splat(u8::MAX.into());
4646        compare_bytes(i16x8_extend_low_u8x16(ones), halves);
4647        compare_bytes(i16x8_extend_high_u8x16(ones), halves);
4648
4649        compare_bytes(i32x4_extend_low_i16x8(zero), zero);
4650        compare_bytes(i32x4_extend_high_i16x8(zero), zero);
4651        compare_bytes(i32x4_extend_low_u16x8(zero), zero);
4652        compare_bytes(i32x4_extend_high_u16x8(zero), zero);
4653        compare_bytes(i32x4_extend_low_i16x8(ones), ones);
4654        compare_bytes(i32x4_extend_high_i16x8(ones), ones);
4655        let halves = u32x4_splat(u16::MAX.into());
4656        compare_bytes(i32x4_extend_low_u16x8(ones), halves);
4657        compare_bytes(i32x4_extend_high_u16x8(ones), halves);
4658
4659        compare_bytes(i64x2_extend_low_i32x4(zero), zero);
4660        compare_bytes(i64x2_extend_high_i32x4(zero), zero);
4661        compare_bytes(i64x2_extend_low_u32x4(zero), zero);
4662        compare_bytes(i64x2_extend_high_u32x4(zero), zero);
4663        compare_bytes(i64x2_extend_low_i32x4(ones), ones);
4664        compare_bytes(i64x2_extend_high_i32x4(ones), ones);
4665        let halves = i64x2_splat(u32::MAX.into());
4666        compare_bytes(u64x2_extend_low_u32x4(ones), halves);
4667        compare_bytes(u64x2_extend_high_u32x4(ones), halves);
4668    }
4669
4670    #[test]
4671    fn test_dot() {
4672        let zero = i8x16_splat(0);
4673        let ones = i8x16_splat(!0);
4674        let two = i32x4_splat(2);
4675        compare_bytes(i32x4_dot_i16x8(zero, zero), zero);
4676        compare_bytes(i32x4_dot_i16x8(ones, ones), two);
4677    }
4678
4679    macro_rules! test_binop {
4680        (
4681            $($name:ident => {
4682                $([$($vec1:tt)*] ($op:ident | $f:ident) [$($vec2:tt)*],)*
4683            })*
4684        ) => ($(
4685            #[test]
4686            fn $name() {
4687                unsafe {
4688                    $(
4689                        let v1 = [$($vec1)*];
4690                        let v2 = [$($vec2)*];
4691                        let v1_v128: v128 = mem::transmute(v1);
4692                        let v2_v128: v128 = mem::transmute(v2);
4693                        let v3_v128 = super::$f(v1_v128, v2_v128);
4694                        let mut v3 = [$($vec1)*];
4695                        let _ignore = v3;
4696                        v3 = mem::transmute(v3_v128);
4697
4698                        for (i, actual) in v3.iter().enumerate() {
4699                            let expected = v1[i].$op(v2[i]);
4700                            assert_eq!(*actual, expected);
4701                        }
4702                    )*
4703                }
4704            }
4705        )*)
4706    }
4707
4708    macro_rules! test_unop {
4709        (
4710            $($name:ident => {
4711                $(($op:ident | $f:ident) [$($vec1:tt)*],)*
4712            })*
4713        ) => ($(
4714            #[test]
4715            fn $name() {
4716                unsafe {
4717                    $(
4718                        let v1 = [$($vec1)*];
4719                        let v1_v128: v128 = mem::transmute(v1);
4720                        let v2_v128 = super::$f(v1_v128);
4721                        let mut v2 = [$($vec1)*];
4722                        let _ignore = v2;
4723                        v2 = mem::transmute(v2_v128);
4724
4725                        for (i, actual) in v2.iter().enumerate() {
4726                            let expected = v1[i].$op();
4727                            assert_eq!(*actual, expected);
4728                        }
4729                    )*
4730                }
4731            }
4732        )*)
4733    }
4734
4735    trait Avgr: Sized {
4736        fn avgr(self, other: Self) -> Self;
4737    }
4738
4739    macro_rules! impl_avgr {
4740        ($($i:ident)*) => ($(impl Avgr for $i {
4741            fn avgr(self, other: Self) -> Self {
4742                ((self as u64 + other as u64 + 1) / 2) as $i
4743            }
4744        })*)
4745    }
4746
4747    impl_avgr!(u8 u16);
4748
4749    test_binop! {
4750        test_i8x16_add => {
4751            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4752                (wrapping_add | i8x16_add)
4753            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4754
4755            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4756                (wrapping_add | i8x16_add)
4757            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4758
4759            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4760                (wrapping_add | i8x16_add)
4761            [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4762        }
4763
4764        test_i8x16_add_sat_s => {
4765            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4766                (saturating_add | i8x16_add_sat)
4767            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4768
4769            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4770                (saturating_add | i8x16_add_sat)
4771            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4772
4773            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4774                (saturating_add | i8x16_add_sat)
4775            [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4776        }
4777
4778        test_i8x16_add_sat_u => {
4779            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4780                (saturating_add | u8x16_add_sat)
4781            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4782
4783            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4784                (saturating_add | u8x16_add_sat)
4785            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4786
4787            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4788                (saturating_add | u8x16_add_sat)
4789            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4790        }
4791
4792        test_i8x16_sub => {
4793            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4794                (wrapping_sub | i8x16_sub)
4795            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4796
4797            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4798                (wrapping_sub | i8x16_sub)
4799            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4800
4801            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4802                (wrapping_sub | i8x16_sub)
4803            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4804        }
4805
4806        test_i8x16_sub_sat_s => {
4807            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4808                (saturating_sub | i8x16_sub_sat)
4809            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4810
4811            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4812                (saturating_sub | i8x16_sub_sat)
4813            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4814
4815            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4816                (saturating_sub | i8x16_sub_sat)
4817            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4818        }
4819
4820        test_i8x16_sub_sat_u => {
4821            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4822                (saturating_sub | u8x16_sub_sat)
4823            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4824
4825            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4826                (saturating_sub | u8x16_sub_sat)
4827            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4828
4829            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4830                (saturating_sub | u8x16_sub_sat)
4831            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4832        }
4833
4834        test_i8x16_min_s => {
4835            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4836                (min | i8x16_min)
4837            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4838
4839            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4840                (min | i8x16_min)
4841            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4842
4843            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4844                (min | i8x16_min)
4845            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4846        }
4847
4848        test_i8x16_min_u => {
4849            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4850                (min | u8x16_min)
4851            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4852
4853            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4854                (min | u8x16_min)
4855            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4856
4857            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4858                (min | u8x16_min)
4859            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4860        }
4861
4862        test_i8x16_max_s => {
4863            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4864                (max | i8x16_max)
4865            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4866
4867            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4868                (max | i8x16_max)
4869            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4870
4871            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4872                (max | i8x16_max)
4873            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4874        }
4875
4876        test_i8x16_max_u => {
4877            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4878                (max | u8x16_max)
4879            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4880
4881            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4882                (max | u8x16_max)
4883            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4884
4885            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4886                (max | u8x16_max)
4887            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4888        }
4889
4890        test_i8x16_avgr_u => {
4891            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4892                (avgr | u8x16_avgr)
4893            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4894
4895            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4896                (avgr | u8x16_avgr)
4897            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4898
4899            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4900                (avgr | u8x16_avgr)
4901            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4902        }
4903
4904        test_i16x8_add => {
4905            [0i16, 0, 0, 0, 0, 0, 0, 0]
4906                (wrapping_add | i16x8_add)
4907            [1i16, 1, 1, 1, 1, 1, 1, 1],
4908
4909            [1i16, 2, 3, 4, 5, 6, 7, 8]
4910                (wrapping_add | i16x8_add)
4911            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4912        }
4913
4914        test_i16x8_add_sat_s => {
4915            [0i16, 0, 0, 0, 0, 0, 0, 0]
4916                (saturating_add | i16x8_add_sat)
4917            [1i16, 1, 1, 1, 1, 1, 1, 1],
4918
4919            [1i16, 2, 3, 4, 5, 6, 7, 8]
4920                (saturating_add | i16x8_add_sat)
4921            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4922        }
4923
4924        test_i16x8_add_sat_u => {
4925            [0u16, 0, 0, 0, 0, 0, 0, 0]
4926                (saturating_add | u16x8_add_sat)
4927            [1u16, 1, 1, 1, 1, 1, 1, 1],
4928
4929            [1u16, 2, 3, 4, 5, 6, 7, 8]
4930                (saturating_add | u16x8_add_sat)
4931            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4932        }
4933
4934        test_i16x8_sub => {
4935            [0i16, 0, 0, 0, 0, 0, 0, 0]
4936                (wrapping_sub | i16x8_sub)
4937            [1i16, 1, 1, 1, 1, 1, 1, 1],
4938
4939            [1i16, 2, 3, 4, 5, 6, 7, 8]
4940                (wrapping_sub | i16x8_sub)
4941            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4942        }
4943
4944        test_i16x8_sub_sat_s => {
4945            [0i16, 0, 0, 0, 0, 0, 0, 0]
4946                (saturating_sub | i16x8_sub_sat)
4947            [1i16, 1, 1, 1, 1, 1, 1, 1],
4948
4949            [1i16, 2, 3, 4, 5, 6, 7, 8]
4950                (saturating_sub | i16x8_sub_sat)
4951            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4952        }
4953
4954        test_i16x8_sub_sat_u => {
4955            [0u16, 0, 0, 0, 0, 0, 0, 0]
4956                (saturating_sub | u16x8_sub_sat)
4957            [1u16, 1, 1, 1, 1, 1, 1, 1],
4958
4959            [1u16, 2, 3, 4, 5, 6, 7, 8]
4960                (saturating_sub | u16x8_sub_sat)
4961            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4962        }
4963
4964        test_i16x8_mul => {
4965            [0i16, 0, 0, 0, 0, 0, 0, 0]
4966                (wrapping_mul | i16x8_mul)
4967            [1i16, 1, 1, 1, 1, 1, 1, 1],
4968
4969            [1i16, 2, 3, 4, 5, 6, 7, 8]
4970                (wrapping_mul | i16x8_mul)
4971            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4972        }
4973
4974        test_i16x8_min_s => {
4975            [0i16, 0, 0, 0, 0, 0, 0, 0]
4976                (min | i16x8_min)
4977            [1i16, 1, 1, 1, 1, 1, 1, 1],
4978
4979            [1i16, 2, 3, 4, 5, 6, 7, 8]
4980                (min | i16x8_min)
4981            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4982        }
4983
4984        test_i16x8_min_u => {
4985            [0u16, 0, 0, 0, 0, 0, 0, 0]
4986                (min | u16x8_min)
4987            [1u16, 1, 1, 1, 1, 1, 1, 1],
4988
4989            [1u16, 2, 3, 4, 5, 6, 7, 8]
4990                (min | u16x8_min)
4991            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4992        }
4993
4994        test_i16x8_max_s => {
4995            [0i16, 0, 0, 0, 0, 0, 0, 0]
4996                (max | i16x8_max)
4997            [1i16, 1, 1, 1, 1, 1, 1, 1],
4998
4999            [1i16, 2, 3, 4, 5, 6, 7, 8]
5000                (max | i16x8_max)
5001            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5002        }
5003
5004        test_i16x8_max_u => {
5005            [0u16, 0, 0, 0, 0, 0, 0, 0]
5006                (max | u16x8_max)
5007            [1u16, 1, 1, 1, 1, 1, 1, 1],
5008
5009            [1u16, 2, 3, 4, 5, 6, 7, 8]
5010                (max | u16x8_max)
5011            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5012        }
5013
5014        test_i16x8_avgr_u => {
5015            [0u16, 0, 0, 0, 0, 0, 0, 0]
5016                (avgr | u16x8_avgr)
5017            [1u16, 1, 1, 1, 1, 1, 1, 1],
5018
5019            [1u16, 2, 3, 4, 5, 6, 7, 8]
5020                (avgr | u16x8_avgr)
5021            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5022        }
5023
5024        test_i32x4_add => {
5025            [0i32, 0, 0, 0] (wrapping_add | i32x4_add) [1, 2, 3, 4],
5026            [1i32, 1283, i32::MAX, i32::MIN]
5027                (wrapping_add | i32x4_add)
5028            [i32::MAX; 4],
5029        }
5030
5031        test_i32x4_sub => {
5032            [0i32, 0, 0, 0] (wrapping_sub | i32x4_sub) [1, 2, 3, 4],
5033            [1i32, 1283, i32::MAX, i32::MIN]
5034                (wrapping_sub | i32x4_sub)
5035            [i32::MAX; 4],
5036        }
5037
5038        test_i32x4_mul => {
5039            [0i32, 0, 0, 0] (wrapping_mul | i32x4_mul) [1, 2, 3, 4],
5040            [1i32, 1283, i32::MAX, i32::MIN]
5041                (wrapping_mul | i32x4_mul)
5042            [i32::MAX; 4],
5043        }
5044
5045        test_i32x4_min_s => {
5046            [0i32, 0, 0, 0] (min | i32x4_min) [1, 2, 3, 4],
5047            [1i32, 1283, i32::MAX, i32::MIN]
5048                (min | i32x4_min)
5049            [i32::MAX; 4],
5050        }
5051
5052        test_i32x4_min_u => {
5053            [0u32, 0, 0, 0] (min | u32x4_min) [1, 2, 3, 4],
5054            [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5055                (min | u32x4_min)
5056            [i32::MAX as u32; 4],
5057        }
5058
5059        test_i32x4_max_s => {
5060            [0i32, 0, 0, 0] (max | i32x4_max) [1, 2, 3, 4],
5061            [1i32, 1283, i32::MAX, i32::MIN]
5062                (max | i32x4_max)
5063            [i32::MAX; 4],
5064        }
5065
5066        test_i32x4_max_u => {
5067            [0u32, 0, 0, 0] (max | u32x4_max) [1, 2, 3, 4],
5068            [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5069                (max | u32x4_max)
5070            [i32::MAX as u32; 4],
5071        }
5072
5073        test_i64x2_add => {
5074            [0i64, 0] (wrapping_add | i64x2_add) [1, 2],
5075            [i64::MIN, i64::MAX] (wrapping_add | i64x2_add) [i64::MAX, i64::MIN],
5076            [i64::MAX; 2] (wrapping_add | i64x2_add) [i64::MAX; 2],
5077            [-4i64, -4] (wrapping_add | i64x2_add) [800, 939],
5078        }
5079
5080        test_i64x2_sub => {
5081            [0i64, 0] (wrapping_sub | i64x2_sub) [1, 2],
5082            [i64::MIN, i64::MAX] (wrapping_sub | i64x2_sub) [i64::MAX, i64::MIN],
5083            [i64::MAX; 2] (wrapping_sub | i64x2_sub) [i64::MAX; 2],
5084            [-4i64, -4] (wrapping_sub | i64x2_sub) [800, 939],
5085        }
5086
5087        test_i64x2_mul => {
5088            [0i64, 0] (wrapping_mul | i64x2_mul) [1, 2],
5089            [i64::MIN, i64::MAX] (wrapping_mul | i64x2_mul) [i64::MAX, i64::MIN],
5090            [i64::MAX; 2] (wrapping_mul | i64x2_mul) [i64::MAX; 2],
5091            [-4i64, -4] (wrapping_mul | i64x2_mul) [800, 939],
5092        }
5093
5094        test_f32x4_add => {
5095            [-1.0f32, 2.0, 3.0, 4.0] (add | f32x4_add) [1., 2., 0., 0.],
5096            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5097                (add | f32x4_add)
5098            [1., 2., 0., 0.],
5099        }
5100
5101        test_f32x4_sub => {
5102            [-1.0f32, 2.0, 3.0, 4.0] (sub | f32x4_sub) [1., 2., 0., 0.],
5103            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5104                (sub | f32x4_sub)
5105            [1., 2., 0., 0.],
5106        }
5107
5108        test_f32x4_mul => {
5109            [-1.0f32, 2.0, 3.0, 4.0] (mul | f32x4_mul) [1., 2., 0., 0.],
5110            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5111                (mul | f32x4_mul)
5112            [1., 2., 1., 0.],
5113        }
5114
5115        test_f32x4_div => {
5116            [-1.0f32, 2.0, 3.0, 4.0] (div | f32x4_div) [1., 2., 0., 0.],
5117            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5118                (div | f32x4_div)
5119            [1., 2., 0., 0.],
5120        }
5121
5122        test_f32x4_min => {
5123            [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_min) [1., 2., 0., 0.],
5124            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5125                (min | f32x4_min)
5126            [1., 2., 0., 0.],
5127        }
5128
5129        test_f32x4_max => {
5130            [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_max) [1., 2., 0., 0.],
5131            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5132                (max | f32x4_max)
5133            [1., 2., 0., 0.],
5134        }
5135
5136        test_f32x4_pmin => {
5137            [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_pmin) [1., 2., 0., 0.],
5138            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5139                (min | f32x4_pmin)
5140            [1., 2., 0., 0.],
5141        }
5142
5143        test_f32x4_pmax => {
5144            [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_pmax) [1., 2., 0., 0.],
5145            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5146                (max | f32x4_pmax)
5147            [1., 2., 0., 0.],
5148        }
5149
5150        test_f64x2_add => {
5151            [-1.0f64, 2.0] (add | f64x2_add) [1., 2.],
5152            [f64::INFINITY, f64::NEG_INFINITY] (add | f64x2_add) [1., 2.],
5153        }
5154
5155        test_f64x2_sub => {
5156            [-1.0f64, 2.0] (sub | f64x2_sub) [1., 2.],
5157            [f64::INFINITY, f64::NEG_INFINITY] (sub | f64x2_sub) [1., 2.],
5158        }
5159
5160        test_f64x2_mul => {
5161            [-1.0f64, 2.0] (mul | f64x2_mul) [1., 2.],
5162            [f64::INFINITY, f64::NEG_INFINITY] (mul | f64x2_mul) [1., 2.],
5163        }
5164
5165        test_f64x2_div => {
5166            [-1.0f64, 2.0] (div | f64x2_div) [1., 2.],
5167            [f64::INFINITY, f64::NEG_INFINITY] (div | f64x2_div) [1., 2.],
5168        }
5169
5170        test_f64x2_min => {
5171            [-1.0f64, 2.0] (min | f64x2_min) [1., 2.],
5172            [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_min) [1., 2.],
5173        }
5174
5175        test_f64x2_max => {
5176            [-1.0f64, 2.0] (max | f64x2_max) [1., 2.],
5177            [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_max) [1., 2.],
5178        }
5179
5180        test_f64x2_pmin => {
5181            [-1.0f64, 2.0] (min | f64x2_pmin) [1., 2.],
5182            [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_pmin) [1., 2.],
5183        }
5184
5185        test_f64x2_pmax => {
5186            [-1.0f64, 2.0] (max | f64x2_pmax) [1., 2.],
5187            [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_pmax) [1., 2.],
5188        }
5189    }
5190
5191    test_unop! {
5192        test_i8x16_abs => {
5193            (wrapping_abs | i8x16_abs)
5194            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5195
5196            (wrapping_abs | i8x16_abs)
5197            [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5198
5199            (wrapping_abs | i8x16_abs)
5200            [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5201        }
5202
5203        test_i8x16_neg => {
5204            (wrapping_neg | i8x16_neg)
5205            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5206
5207            (wrapping_neg | i8x16_neg)
5208            [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5209
5210            (wrapping_neg | i8x16_neg)
5211            [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5212        }
5213
5214        test_i16x8_abs => {
5215            (wrapping_abs | i16x8_abs) [1i16, 1, 1, 1, 1, 1, 1, 1],
5216            (wrapping_abs | i16x8_abs) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5217        }
5218
5219        test_i16x8_neg => {
5220            (wrapping_neg | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
5221            (wrapping_neg | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5222        }
5223
5224        test_i32x4_abs => {
5225            (wrapping_abs | i32x4_abs) [1i32, 2, 3, 4],
5226            (wrapping_abs | i32x4_abs) [i32::MIN, i32::MAX, 0, 4],
5227        }
5228
5229        test_i32x4_neg => {
5230            (wrapping_neg | i32x4_neg) [1i32, 2, 3, 4],
5231            (wrapping_neg | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
5232        }
5233
5234        test_i64x2_abs => {
5235            (wrapping_abs | i64x2_abs) [1i64, 2],
5236            (wrapping_abs | i64x2_abs) [i64::MIN, i64::MAX],
5237        }
5238
5239        test_i64x2_neg => {
5240            (wrapping_neg | i64x2_neg) [1i64, 2],
5241            (wrapping_neg | i64x2_neg) [i64::MIN, i64::MAX],
5242        }
5243
5244        test_f32x4_ceil => {
5245            (ceil | f32x4_ceil) [1.0f32, 2., 2.5, 3.3],
5246            (ceil | f32x4_ceil) [0.0, -0.3, f32::INFINITY, -0.0],
5247        }
5248
5249        test_f32x4_floor => {
5250            (floor | f32x4_floor) [1.0f32, 2., 2.5, 3.3],
5251            (floor | f32x4_floor) [0.0, -0.3, f32::INFINITY, -0.0],
5252        }
5253
5254        test_f32x4_trunc => {
5255            (trunc | f32x4_trunc) [1.0f32, 2., 2.5, 3.3],
5256            (trunc | f32x4_trunc) [0.0, -0.3, f32::INFINITY, -0.0],
5257        }
5258
5259        test_f32x4_nearest => {
5260            (round | f32x4_nearest) [1.0f32, 2., 2.6, 3.3],
5261            (round | f32x4_nearest) [0.0, -0.3, f32::INFINITY, -0.0],
5262        }
5263
5264        test_f32x4_abs => {
5265            (abs | f32x4_abs) [1.0f32, 2., 2.6, 3.3],
5266            (abs | f32x4_abs) [0.0, -0.3, f32::INFINITY, -0.0],
5267        }
5268
5269        test_f32x4_neg => {
5270            (neg | f32x4_neg) [1.0f32, 2., 2.6, 3.3],
5271            (neg | f32x4_neg) [0.0, -0.3, f32::INFINITY, -0.0],
5272        }
5273
5274        test_f32x4_sqrt => {
5275            (sqrt | f32x4_sqrt) [1.0f32, 2., 2.6, 3.3],
5276            (sqrt | f32x4_sqrt) [0.0, 0.3, f32::INFINITY, 0.1],
5277        }
5278
5279        test_f64x2_ceil => {
5280            (ceil | f64x2_ceil) [1.0f64, 2.3],
5281            (ceil | f64x2_ceil) [f64::INFINITY, -0.1],
5282        }
5283
5284        test_f64x2_floor => {
5285            (floor | f64x2_floor) [1.0f64, 2.3],
5286            (floor | f64x2_floor) [f64::INFINITY, -0.1],
5287        }
5288
5289        test_f64x2_trunc => {
5290            (trunc | f64x2_trunc) [1.0f64, 2.3],
5291            (trunc | f64x2_trunc) [f64::INFINITY, -0.1],
5292        }
5293
5294        test_f64x2_nearest => {
5295            (round | f64x2_nearest) [1.0f64, 2.3],
5296            (round | f64x2_nearest) [f64::INFINITY, -0.1],
5297        }
5298
5299        test_f64x2_abs => {
5300            (abs | f64x2_abs) [1.0f64, 2.3],
5301            (abs | f64x2_abs) [f64::INFINITY, -0.1],
5302        }
5303
5304        test_f64x2_neg => {
5305            (neg | f64x2_neg) [1.0f64, 2.3],
5306            (neg | f64x2_neg) [f64::INFINITY, -0.1],
5307        }
5308
5309        test_f64x2_sqrt => {
5310            (sqrt | f64x2_sqrt) [1.0f64, 2.3],
5311            (sqrt | f64x2_sqrt) [f64::INFINITY, 0.1],
5312        }
5313    }
5314
5315    macro_rules! floating_point {
5316        (f32) => {
5317            true
5318        };
5319        (f64) => {
5320            true
5321        };
5322        ($id:ident) => {
5323            false
5324        };
5325    }
5326
5327    trait IsNan: Sized {
5328        fn is_nan(self) -> bool {
5329            false
5330        }
5331    }
5332    impl IsNan for i8 {}
5333    impl IsNan for i16 {}
5334    impl IsNan for i32 {}
5335    impl IsNan for i64 {}
5336
5337    macro_rules! test_bop {
5338         ($id:ident[$ety:ident; $ecount:expr] |
5339          $binary_op:ident [$op_test_id:ident] :
5340          ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5341             test_bop!(
5342                 $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
5343                 ([$($in_a),*], [$($in_b),*]) => [$($out),*]
5344             );
5345
5346         };
5347         ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
5348          $binary_op:ident [$op_test_id:ident] :
5349          ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5350             #[test]
5351             fn $op_test_id() {
5352                 unsafe {
5353                     let a_input: [$ety; $ecount] = [$($in_a),*];
5354                     let b_input: [$ety; $ecount] = [$($in_b),*];
5355                     let output: [$oty; $ecount] = [$($out),*];
5356
5357                     let a_vec_in: v128 = transmute(a_input);
5358                     let b_vec_in: v128 = transmute(b_input);
5359                     let vec_res: v128 = $binary_op(a_vec_in, b_vec_in);
5360
5361                     let res: [$oty; $ecount] = transmute(vec_res);
5362
5363                     if !floating_point!($ety) {
5364                         assert_eq!(res, output);
5365                     } else {
5366                         for i in 0..$ecount {
5367                             let r = res[i];
5368                             let o = output[i];
5369                             assert_eq!(r.is_nan(), o.is_nan());
5370                             if !r.is_nan() {
5371                                 assert_eq!(r, o);
5372                             }
5373                         }
5374                     }
5375                 }
5376             }
5377         }
5378     }
5379
5380    macro_rules! test_bops {
5381         ($id:ident[$ety:ident; $ecount:expr] |
5382          $binary_op:ident [$op_test_id:ident]:
5383          ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
5384             #[test]
5385             fn $op_test_id() {
5386                 unsafe {
5387                     let a_input: [$ety; $ecount] = [$($in_a),*];
5388                     let output: [$ety; $ecount] = [$($out),*];
5389
5390                     let a_vec_in: v128 = transmute(a_input);
5391                     let vec_res: v128 = $binary_op(a_vec_in, $in_b);
5392
5393                     let res: [$ety; $ecount] = transmute(vec_res);
5394                     assert_eq!(res, output);
5395                 }
5396             }
5397         }
5398     }
5399
5400    macro_rules! test_uop {
5401         ($id:ident[$ety:ident; $ecount:expr] |
5402          $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
5403             #[test]
5404             fn $op_test_id() {
5405                 unsafe {
5406                     let a_input: [$ety; $ecount] = [$($in_a),*];
5407                     let output: [$ety; $ecount] = [$($out),*];
5408
5409                     let a_vec_in: v128 = transmute(a_input);
5410                     let vec_res: v128 = $unary_op(a_vec_in);
5411
5412                     let res: [$ety; $ecount] = transmute(vec_res);
5413                     assert_eq!(res, output);
5414                 }
5415             }
5416         }
5417     }
5418
5419    test_bops!(i8x16[i8; 16] | i8x16_shl[i8x16_shl_test]:
5420               ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5421               [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
5422    test_bops!(i16x8[i16; 8] | i16x8_shl[i16x8_shl_test]:
5423                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5424                [0, -2, 4, 6, 8, 10, 12, -2]);
5425    test_bops!(i32x4[i32; 4] | i32x4_shl[i32x4_shl_test]:
5426                ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
5427    test_bops!(i64x2[i64; 2] | i64x2_shl[i64x2_shl_test]:
5428                ([0, -1], 1) => [0, -2]);
5429
5430    test_bops!(i8x16[i8; 16] | i8x16_shr[i8x16_shr_s_test]:
5431               ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5432               [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5433    test_bops!(i16x8[i16; 8] | i16x8_shr[i16x8_shr_s_test]:
5434               ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5435               [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
5436    test_bops!(i32x4[i32; 4] | i32x4_shr[i32x4_shr_s_test]:
5437               ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
5438    test_bops!(i64x2[i64; 2] | i64x2_shr[i64x2_shr_s_test]:
5439               ([0, -1], 1) => [0, -1]);
5440
5441    test_bops!(i8x16[i8; 16] | u8x16_shr[i8x16_uhr_u_test]:
5442                ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5443                [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5444    test_bops!(i16x8[i16; 8] | u16x8_shr[i16x8_uhr_u_test]:
5445                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5446                [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
5447    test_bops!(i32x4[i32; 4] | u32x4_shr[i32x4_uhr_u_test]:
5448                ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
5449    test_bops!(i64x2[i64; 2] | u64x2_shr[i64x2_uhr_u_test]:
5450                ([0, -1], 1) => [0, i64::MAX]);
5451
5452    #[test]
5453    fn v128_bitwise_logical_ops() {
5454        unsafe {
5455            let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
5456            let b: [u32; 4] = [u32::MAX; 4];
5457            let c: [u32; 4] = [0; 4];
5458
5459            let vec_a: v128 = transmute(a);
5460            let vec_b: v128 = transmute(b);
5461            let vec_c: v128 = transmute(c);
5462
5463            let r: v128 = v128_and(vec_a, vec_a);
5464            compare_bytes(r, vec_a);
5465            let r: v128 = v128_and(vec_a, vec_b);
5466            compare_bytes(r, vec_a);
5467            let r: v128 = v128_andnot(vec_a, vec_b);
5468            compare_bytes(r, vec_c);
5469            let r: v128 = v128_andnot(vec_a, vec_a);
5470            compare_bytes(r, vec_c);
5471            let r: v128 = v128_andnot(vec_a, vec_c);
5472            compare_bytes(r, vec_a);
5473            let r: v128 = v128_or(vec_a, vec_b);
5474            compare_bytes(r, vec_b);
5475            let r: v128 = v128_not(vec_b);
5476            compare_bytes(r, vec_c);
5477            let r: v128 = v128_xor(vec_a, vec_c);
5478            compare_bytes(r, vec_a);
5479
5480            let r: v128 = v128_bitselect(vec_b, vec_c, vec_b);
5481            compare_bytes(r, vec_b);
5482            let r: v128 = v128_bitselect(vec_b, vec_c, vec_c);
5483            compare_bytes(r, vec_c);
5484            let r: v128 = v128_bitselect(vec_b, vec_c, vec_a);
5485            compare_bytes(r, vec_a);
5486        }
5487    }
5488
5489    macro_rules! test_bool_red {
5490         ([$test_id:ident, $any:ident, $all:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
5491             #[test]
5492             fn $test_id() {
5493                 unsafe {
5494                     let vec_a: v128 = transmute([$($true),*]); let vec_b: v128 = transmute([$($false),*]); let vec_c: v128 = transmute([$($alt),*]); assert_eq!($all(vec_a), true);
5504                     assert_eq!($all(vec_b), false);
5505                     assert_eq!($all(vec_c), false);
5506                 }
5507             }
5508         }
5509     }
5510
5511    test_bool_red!(
5512        [i8x16_boolean_reductions, v128_any_true, i8x16_all_true]
5513            | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
5514            | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5515            | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
5516    );
5517    test_bool_red!(
5518        [i16x8_boolean_reductions, v128_any_true, i16x8_all_true]
5519            | [1_i16, 1, 1, 1, 1, 1, 1, 1]
5520            | [0_i16, 0, 0, 0, 0, 0, 0, 0]
5521            | [1_i16, 0, 1, 0, 1, 0, 1, 0]
5522    );
5523    test_bool_red!(
5524        [i32x4_boolean_reductions, v128_any_true, i32x4_all_true]
5525            | [1_i32, 1, 1, 1]
5526            | [0_i32, 0, 0, 0]
5527            | [1_i32, 0, 1, 0]
5528    );
5529    test_bool_red!(
5530        [i64x2_boolean_reductions, v128_any_true, i64x2_all_true]
5531            | [1_i64, 1]
5532            | [0_i64, 0]
5533            | [1_i64, 0]
5534    );
5535
5536    test_bop!(i8x16[i8; 16] | i8x16_eq[i8x16_eq_test]:
5537              ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5538               [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5539              [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5540    test_bop!(i16x8[i16; 8] | i16x8_eq[i16x8_eq_test]:
5541               ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5542               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5543    test_bop!(i32x4[i32; 4] | i32x4_eq[i32x4_eq_test]:
5544               ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5545    test_bop!(i64x2[i64; 2] | i64x2_eq[i64x2_eq_test]:
5546               ([0, 1], [0, 2]) => [-1, 0]);
5547    test_bop!(f32x4[f32; 4] => i32 | f32x4_eq[f32x4_eq_test]:
5548               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5549    test_bop!(f64x2[f64; 2] => i64 | f64x2_eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5550
5551    test_bop!(i8x16[i8; 16] | i8x16_ne[i8x16_ne_test]:
5552               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5553                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5554               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5555    test_bop!(i16x8[i16; 8] | i16x8_ne[i16x8_ne_test]:
5556               ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5557               [0, -1, 0, -1 ,0, -1, 0, 0]);
5558    test_bop!(i32x4[i32; 4] | i32x4_ne[i32x4_ne_test]:
5559               ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5560    test_bop!(i64x2[i64; 2] | i64x2_ne[i64x2_ne_test]:
5561               ([0, 1], [0, 2]) => [0, -1]);
5562    test_bop!(f32x4[f32; 4] => i32 | f32x4_ne[f32x4_ne_test]:
5563               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5564    test_bop!(f64x2[f64; 2] => i64 | f64x2_ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5565
5566    test_bop!(i8x16[i8; 16] | i8x16_lt[i8x16_lt_s_test]:
5567               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5568                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5569               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0]);
5570    test_bop!(i8x16[i8; 16] | u8x16_lt[i8x16_lt_u_test]:
5571               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5572                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5573               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5574    test_bop!(i16x8[i16; 8] | i16x8_lt[i16x8_lt_s_test]:
5575               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5576               [0, -1, 0, -1 ,0, -1, 0, -1]);
5577    test_bop!(i16x8[i16; 8] | u16x8_lt[i16x8_lt_u_test]:
5578               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5579               [0, -1, 0, -1 ,0, -1, 0, 0]);
5580    test_bop!(i32x4[i32; 4] | i32x4_lt[i32x4_lt_s_test]:
5581               ([-1, 1, 2, 3], [0, 2, 2, 4]) => [-1, -1, 0, -1]);
5582    test_bop!(i32x4[i32; 4] | u32x4_lt[i32x4_lt_u_test]:
5583               ([-1, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5584    test_bop!(i64x2[i64; 2] | i64x2_lt[i64x2_lt_s_test]:
5585               ([-1, 3], [0, 2]) => [-1, 0]);
5586    test_bop!(f32x4[f32; 4] => i32 | f32x4_lt[f32x4_lt_test]:
5587               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5588    test_bop!(f64x2[f64; 2] => i64 | f64x2_lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5589
5590    test_bop!(i8x16[i8; 16] | i8x16_gt[i8x16_gt_s_test]:
5591           ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5592            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5593               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5594    test_bop!(i8x16[i8; 16] | u8x16_gt[i8x16_gt_u_test]:
5595           ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5596            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5597               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, -1]);
5598    test_bop!(i16x8[i16; 8] | i16x8_gt[i16x8_gt_s_test]:
5599               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5600               [0, -1, 0, -1 ,0, -1, 0, 0]);
5601    test_bop!(i16x8[i16; 8] | u16x8_gt[i16x8_gt_u_test]:
5602               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5603               [0, -1, 0, -1 ,0, -1, 0, -1]);
5604    test_bop!(i32x4[i32; 4] | i32x4_gt[i32x4_gt_s_test]:
5605               ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, 0]);
5606    test_bop!(i32x4[i32; 4] | u32x4_gt[i32x4_gt_u_test]:
5607               ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
5608    test_bop!(i64x2[i64; 2] | i64x2_gt[i64x2_gt_s_test]:
5609               ([-1, 2], [0, 1]) => [0, -1]);
5610    test_bop!(f32x4[f32; 4] => i32 | f32x4_gt[f32x4_gt_test]:
5611               ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
5612    test_bop!(f64x2[f64; 2] => i64 | f64x2_gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
5613
5614    test_bop!(i8x16[i8; 16] | i8x16_ge[i8x16_ge_s_test]:
5615               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5616                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5617               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5618    test_bop!(i8x16[i8; 16] | u8x16_ge[i8x16_ge_u_test]:
5619               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5620                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5621               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5622    test_bop!(i16x8[i16; 8] | i16x8_ge[i16x8_ge_s_test]:
5623               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5624               [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5625    test_bop!(i16x8[i16; 8] | u16x8_ge[i16x8_ge_u_test]:
5626               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5627               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5628    test_bop!(i32x4[i32; 4] | i32x4_ge[i32x4_ge_s_test]:
5629               ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5630    test_bop!(i32x4[i32; 4] | u32x4_ge[i32x4_ge_u_test]:
5631               ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, -1]);
5632    test_bop!(i64x2[i64; 2] | i64x2_ge[i64x2_ge_s_test]:
5633               ([0, 1], [-1, 2]) => [-1, 0]);
5634    test_bop!(f32x4[f32; 4] => i32 | f32x4_ge[f32x4_ge_test]:
5635               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5636    test_bop!(f64x2[f64; 2] => i64 | f64x2_ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5637
5638    test_bop!(i8x16[i8; 16] | i8x16_le[i8x16_le_s_test]:
5639               ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5640                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5641               ) =>
5642               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5643    test_bop!(i8x16[i8; 16] | u8x16_le[i8x16_le_u_test]:
5644               ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5645                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5646               ) =>
5647               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5648    test_bop!(i16x8[i16; 8] | i16x8_le[i16x8_le_s_test]:
5649               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5650               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5651    test_bop!(i16x8[i16; 8] | u16x8_le[i16x8_le_u_test]:
5652               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5653               [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5654    test_bop!(i32x4[i32; 4] | i32x4_le[i32x4_le_s_test]:
5655               ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, -1]);
5656    test_bop!(i32x4[i32; 4] | u32x4_le[i32x4_le_u_test]:
5657               ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
5658    test_bop!(i64x2[i64; 2] | i64x2_le[i64x2_le_s_test]:
5659               ([0, 2], [0, 1]) => [-1, 0]);
5660    test_bop!(f32x4[f32; 4] => i32 | f32x4_le[f32x4_le_test]:
5661               ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
5662    test_bop!(f64x2[f64; 2] => i64 | f64x2_le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
5663
5664    test_uop!(f32x4[f32; 4] | f32x4_neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
5665    test_uop!(f32x4[f32; 4] | f32x4_abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
5666    test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test]:
5667              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
5668    test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test_nan]:
5669              ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5670              => [0., -3., -4., f32::NAN]);
5671    test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test]:
5672              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
5673    test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test_nan]:
5674              ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5675              => [1., -1., 7., f32::NAN]);
5676    test_bop!(f32x4[f32; 4] | f32x4_add[f32x4_add_test]:
5677              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
5678    test_bop!(f32x4[f32; 4] | f32x4_sub[f32x4_sub_test]:
5679              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
5680    test_bop!(f32x4[f32; 4] | f32x4_mul[f32x4_mul_test]:
5681              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
5682    test_bop!(f32x4[f32; 4] | f32x4_div[f32x4_div_test]:
5683              ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
5684
5685    test_uop!(f64x2[f64; 2] | f64x2_neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
5686    test_uop!(f64x2[f64; 2] | f64x2_abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
5687    test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test]:
5688               ([0., -1.], [1., -3.]) => [0., -3.]);
5689    test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test_nan]:
5690               ([7., 8.], [-4., f64::NAN])
5691               => [ -4., f64::NAN]);
5692    test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test]:
5693               ([0., -1.], [1., -3.]) => [1., -1.]);
5694    test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test_nan]:
5695               ([7., 8.], [ -4., f64::NAN])
5696               => [7., f64::NAN]);
5697    test_bop!(f64x2[f64; 2] | f64x2_add[f64x2_add_test]:
5698               ([0., -1.], [1., -3.]) => [1., -4.]);
5699    test_bop!(f64x2[f64; 2] | f64x2_sub[f64x2_sub_test]:
5700               ([0., -1.], [1., -3.]) => [-1., 2.]);
5701    test_bop!(f64x2[f64; 2] | f64x2_mul[f64x2_mul_test]:
5702               ([0., -1.], [1., -3.]) => [0., 3.]);
5703    test_bop!(f64x2[f64; 2] | f64x2_div[f64x2_div_test]:
5704               ([0., -8.], [1., 4.]) => [0., -2.]);
5705
5706    macro_rules! test_conv {
5707        ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr,  $to:expr) => {
5708            #[test]
5709            fn $test_id() {
5710                unsafe {
5711                    let from: v128 = transmute($from);
5712                    let to: v128 = transmute($to);
5713
5714                    let r: v128 = $conv_id(from);
5715
5716                    compare_bytes(r, to);
5717                }
5718            }
5719        };
5720    }
5721
5722    test_conv!(
5723        f32x4_convert_s_i32x4 | f32x4_convert_i32x4 | f32x4 | [1_i32, 2, 3, 4],
5724        [1_f32, 2., 3., 4.]
5725    );
5726    test_conv!(
5727        f32x4_convert_u_i32x4 | f32x4_convert_u32x4 | f32x4 | [u32::MAX, 2, 3, 4],
5728        [u32::MAX as f32, 2., 3., 4.]
5729    );
5730
5731    #[test]
5732    fn test_conversions() {
5733        compare_bytes(
5734            i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5735            i32x4(1, i32::MIN, i32::MAX, 0),
5736        );
5737        compare_bytes(
5738            u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5739            u32x4(1, 0, u32::MAX, 0),
5740        );
5741        compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.));
5742        compare_bytes(
5743            f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)),
5744            f64x2(f64::from(i32::MIN), f64::from(i32::MAX)),
5745        );
5746        compare_bytes(f64x2_convert_low_u32x4(u32x4(1, 2, 3, 4)), f64x2(1., 2.));
5747        compare_bytes(
5748            f64x2_convert_low_u32x4(u32x4(u32::MIN, u32::MAX, 3, 4)),
5749            f64x2(f64::from(u32::MIN), f64::from(u32::MAX)),
5750        );
5751
5752        compare_bytes(
5753            i32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5754            i32x4(1, i32::MIN, 0, 0),
5755        );
5756        compare_bytes(
5757            i32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5758            i32x4(0, i32::MAX, 0, 0),
5759        );
5760        compare_bytes(
5761            u32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5762            u32x4(1, 0, 0, 0),
5763        );
5764        compare_bytes(
5765            u32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5766            u32x4(0, u32::MAX, 0, 0),
5767        );
5768    }
5769
5770    #[test]
5771    fn test_popcnt() {
5772        unsafe {
5773            for i in 0..=255 {
5774                compare_bytes(
5775                    i8x16_popcnt(u8x16_splat(i)),
5776                    u8x16_splat(i.count_ones() as u8),
5777                )
5778            }
5779
5780            let vectors = [
5781                [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5782                [
5783                    100, 200, 50, 0, 10, 7, 38, 185, 192, 3, 34, 85, 93, 7, 31, 99,
5784                ],
5785            ];
5786
5787            for vector in vectors.iter() {
5788                compare_bytes(
5789                    i8x16_popcnt(transmute(*vector)),
5790                    i8x16(
5791                        vector[0].count_ones() as i8,
5792                        vector[1].count_ones() as i8,
5793                        vector[2].count_ones() as i8,
5794                        vector[3].count_ones() as i8,
5795                        vector[4].count_ones() as i8,
5796                        vector[5].count_ones() as i8,
5797                        vector[6].count_ones() as i8,
5798                        vector[7].count_ones() as i8,
5799                        vector[8].count_ones() as i8,
5800                        vector[9].count_ones() as i8,
5801                        vector[10].count_ones() as i8,
5802                        vector[11].count_ones() as i8,
5803                        vector[12].count_ones() as i8,
5804                        vector[13].count_ones() as i8,
5805                        vector[14].count_ones() as i8,
5806                        vector[15].count_ones() as i8,
5807                    ),
5808                )
5809            }
5810        }
5811    }
5812
5813    #[test]
5814    fn test_promote_demote() {
5815        let tests = [
5816            [1., 2.],
5817            [f64::NAN, f64::INFINITY],
5818            [100., 201.],
5819            [0., -0.],
5820            [f64::NEG_INFINITY, 0.],
5821        ];
5822
5823        for [a, b] in tests {
5824            compare_bytes(
5825                f32x4_demote_f64x2_zero(f64x2(a, b)),
5826                f32x4(a as f32, b as f32, 0., 0.),
5827            );
5828            compare_bytes(
5829                f64x2_promote_low_f32x4(f32x4(a as f32, b as f32, 0., 0.)),
5830                f64x2(a, b),
5831            );
5832        }
5833    }
5834
5835    #[test]
5836    fn test_extmul() {
5837        macro_rules! test {
5838            ($(
5839                $ctor:ident {
5840                    from: $from:ident,
5841                    to: $to:ident,
5842                    low: $low:ident,
5843                    high: $high:ident,
5844                } => {
5845                    $(([$($a:tt)*] * [$($b:tt)*]))*
5846                }
5847            )*) => ($(
5848                $(unsafe {
5849                    let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
5850                    let b: [$from; 16 / mem::size_of::<$from>()] = [$($b)*];
5851                    let low = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($low($ctor($($a)*), $ctor($($b)*)));
5852                    let high = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($high($ctor($($a)*), $ctor($($b)*)));
5853
5854                    let half = a.len() / 2;
5855                    for i in 0..half {
5856                        assert_eq!(
5857                            (a[i] as $to).wrapping_mul((b[i] as $to)),
5858                            low[i],
5859                            "expected {} * {}", a[i] as $to, b[i] as $to,
5860                        );
5861                        assert_eq!(
5862                            (a[half + i] as $to).wrapping_mul((b[half + i] as $to)),
5863                            high[i],
5864                            "expected {} * {}", a[half + i] as $to, b[half + i] as $to,
5865                        );
5866                    }
5867                })*
5868            )*)
5869        }
5870        test! {
5871            i8x16 {
5872                from: i8,
5873                to: i16,
5874                low: i16x8_extmul_low_i8x16,
5875                high: i16x8_extmul_high_i8x16,
5876            } => {
5877                (
5878                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5879                        *
5880                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5881                )
5882                (
5883                    [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
5884                        *
5885                    [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
5886                )
5887            }
5888            u8x16 {
5889                from: u8,
5890                to: u16,
5891                low: u16x8_extmul_low_u8x16,
5892                high: u16x8_extmul_high_u8x16,
5893            } => {
5894                (
5895                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5896                        *
5897                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5898                )
5899                (
5900                    [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
5901                        *
5902                    [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
5903                )
5904            }
5905            i16x8 {
5906                from: i16,
5907                to: i32,
5908                low: i32x4_extmul_low_i16x8,
5909                high: i32x4_extmul_high_i16x8,
5910            } => {
5911                (
5912                    [0, 0, 0, 0, 0, 0, 0, 0]
5913                        *
5914                    [0, 0, 0, 0, 0, 0, 0, 0]
5915                )
5916                (
5917                    [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
5918                        *
5919                    [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
5920                )
5921            }
5922            u16x8 {
5923                from: u16,
5924                to: u32,
5925                low: u32x4_extmul_low_u16x8,
5926                high: u32x4_extmul_high_u16x8,
5927            } => {
5928                (
5929                    [0, 0, 0, 0, 0, 0, 0, 0]
5930                        *
5931                    [0, 0, 0, 0, 0, 0, 0, 0]
5932                )
5933                (
5934                    [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
5935                        *
5936                    [1, 1, 3, 29391, 105, 2, 100, 2]
5937                )
5938            }
5939            i32x4 {
5940                from: i32,
5941                to: i64,
5942                low: i64x2_extmul_low_i32x4,
5943                high: i64x2_extmul_high_i32x4,
5944            } => {
5945                (
5946                    [0, 0, 0, 0]
5947                        *
5948                    [0, 0, 0, 0]
5949                )
5950                (
5951                    [-1, 0, i32::MAX, 19931]
5952                        *
5953                    [1, 1, i32::MIN, 29391]
5954                )
5955                (
5956                    [i32::MAX, 3003183, 3 << 20, 0xffffff]
5957                        *
5958                    [i32::MAX, i32::MIN, -40042, 300]
5959                )
5960            }
5961            u32x4 {
5962                from: u32,
5963                to: u64,
5964                low: u64x2_extmul_low_u32x4,
5965                high: u64x2_extmul_high_u32x4,
5966            } => {
5967                (
5968                    [0, 0, 0, 0]
5969                        *
5970                    [0, 0, 0, 0]
5971                )
5972                (
5973                    [1, 0, u32::MAX, 19931]
5974                        *
5975                    [1, 1, 3, 29391]
5976                )
5977                (
5978                    [u32::MAX, 3003183, 3 << 20, 0xffffff]
5979                        *
5980                    [u32::MAX, 3000, 40042, 300]
5981                )
5982            }
5983        }
5984    }
5985
5986    #[test]
5987    fn test_q15mulr_sat_s() {
5988        fn test(a: [i16; 8], b: [i16; 8]) {
5989            let a_v = i16x8(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
5990            let b_v = i16x8(b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
5991            let result = i16x8_q15mulr_sat(a_v, b_v);
5992            let result = unsafe { mem::transmute::<v128, [i16; 8]>(result) };
5993
5994            for (i, (a, b)) in a.iter().zip(&b).enumerate() {
5995                assert_eq!(
5996                    result[i],
5997                    (((*a as i32) * (*b as i32) + 0x4000) >> 15) as i16
5998                );
5999            }
6000        }
6001
6002        test([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]);
6003        test([1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]);
6004        test(
6005            [-1, 100, 2003, -29494, 12, 128, 994, 1],
6006            [-4049, 8494, -10483, 0, 5, 2222, 883, -9],
6007        );
6008    }
6009
6010    #[test]
6011    fn test_extadd() {
6012        macro_rules! test {
6013            ($(
6014                $func:ident {
6015                    from: $from:ident,
6016                    to: $to:ident,
6017                } => {
6018                    $([$($a:tt)*])*
6019                }
6020            )*) => ($(
6021                $(unsafe {
6022                    let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
6023                    let a_v = mem::transmute::<_, v128>(a);
6024                    let r = mem::transmute::<v128, [$to; 16 / mem::size_of::<$to>()]>($func(a_v));
6025
6026                    let half = a.len() / 2;
6027                    for i in 0..half {
6028                        assert_eq!(
6029                            (a[2 * i] as $to).wrapping_add((a[2 * i + 1] as $to)),
6030                            r[i],
6031                            "failed {} + {} != {}",
6032                            a[2 * i] as $to,
6033                            a[2 * i + 1] as $to,
6034                            r[i],
6035                        );
6036                    }
6037                })*
6038            )*)
6039        }
6040        test! {
6041            i16x8_extadd_pairwise_i8x16 {
6042                from: i8,
6043                to: i16,
6044            } => {
6045                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6046                [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
6047                [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
6048            }
6049            i16x8_extadd_pairwise_u8x16 {
6050                from: u8,
6051                to: i16,
6052            } => {
6053                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6054                [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
6055                [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
6056            }
6057            i32x4_extadd_pairwise_i16x8 {
6058                from: i16,
6059                to: i32,
6060            } => {
6061                [0, 0, 0, 0, 0, 0, 0, 0]
6062                [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
6063                [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
6064            }
6065            i32x4_extadd_pairwise_u16x8 {
6066                from: u16,
6067                to: i32,
6068            } => {
6069                [0, 0, 0, 0, 0, 0, 0, 0]
6070                [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
6071                [1, 1, 3, 29391, 105, 2, 100, 2]
6072            }
6073        }
6074    }
6075}