diff --git a/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml
index 3f97d499462..a484f4eca7a 100644
--- a/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml
@@ -5448,7 +5448,10 @@
To be added.
- int8_t vqmovnh_s16 (int16_t a) A64: SQXTN Bd, Hn
+
+ int8_t vqmovnh_s16 (int16_t a)
+ A64: SQXTN Bd, Hn
+
To be added.
To be added.
@@ -5476,7 +5479,10 @@
To be added.
- int16_t vqmovns_s32 (int32_t a) A64: SQXTN Hd, Sn
+
+ int16_t vqmovns_s32 (int32_t a)
+ A64: SQXTN Hd, Sn
+
To be added.
To be added.
@@ -5504,7 +5510,10 @@
To be added.
- int32_t vqmovnd_s64 (int64_t a) A64: SQXTN Sd, Dn
+
+ int32_t vqmovnd_s64 (int64_t a)
+ A64: SQXTN Sd, Dn
+
To be added.
To be added.
@@ -5532,7 +5541,10 @@
To be added.
- uint8_t vqmovnh_u16 (uint16_t a) A64: UQXTN Bd, Hn
+
+ uint8_t vqmovnh_u16 (uint16_t a)
+ A64: UQXTN Bd, Hn
+
To be added.
To be added.
@@ -5560,7 +5572,10 @@
To be added.
- uint16_t vqmovns_u32 (uint32_t a) A64: UQXTN Hd, Sn
+
+ uint16_t vqmovns_u32 (uint32_t a)
+ A64: UQXTN Hd, Sn
+
To be added.
To be added.
@@ -5588,7 +5603,10 @@
To be added.
- uint32_t vqmovnd_u64 (uint64_t a) A64: UQXTN Sd, Dn
+
+ uint32_t vqmovnd_u64 (uint64_t a)
+ A64: UQXTN Sd, Dn
+
To be added.
To be added.
@@ -5616,7 +5634,10 @@
To be added.
- uint8_t vqmovunh_s16 (int16_t a) A64: SQXTUN Bd, Hn
+
+ uint8_t vqmovunh_s16 (int16_t a)
+ A64: SQXTUN Bd, Hn
+
To be added.
To be added.
@@ -5644,7 +5665,10 @@
To be added.
- uint16_t vqmovuns_s32 (int32_t a) A64: SQXTUN Hd, Sn
+
+ uint16_t vqmovuns_s32 (int32_t a)
+ A64: SQXTUN Hd, Sn
+
To be added.
To be added.
@@ -5672,7 +5696,10 @@
To be added.
- uint32_t vqmovund_s64 (int64_t a) A64: SQXTUN Sd, Dn
+
+ uint32_t vqmovund_s64 (int64_t a)
+ A64: SQXTUN Sd, Dn
+
To be added.
To be added.
@@ -8413,7 +8440,9 @@
To be added.
- A64: LDP St1, St2, [Xn]
+
+ A64: LDP St1, St2, [Xn]
+
To be added.
To be added.
@@ -8444,7 +8473,9 @@
To be added.
- A64: LDP St1, St2, [Xn]
+
+ A64: LDP St1, St2, [Xn]
+
To be added.
To be added.
@@ -8475,7 +8506,9 @@
To be added.
- A64: LDP St1, St2, [Xn]
+
+ A64: LDP St1, St2, [Xn]
+
To be added.
To be added.
@@ -8506,7 +8539,9 @@
To be added.
- A64: LDNP St1, St2, [Xn]
+
+ A64: LDNP St1, St2, [Xn]
+
To be added.
To be added.
@@ -8537,7 +8572,9 @@
To be added.
- A64: LDNP St1, St2, [Xn]
+
+ A64: LDNP St1, St2, [Xn]
+
To be added.
To be added.
@@ -8568,7 +8605,9 @@
To be added.
- A64: LDNP St1, St2, [Xn]
+
+ A64: LDNP St1, St2, [Xn]
+
To be added.
To be added.
@@ -8599,7 +8638,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8630,7 +8671,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8661,7 +8704,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8692,7 +8737,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8723,7 +8770,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8754,7 +8803,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8785,7 +8836,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8816,7 +8869,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8847,7 +8902,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8878,7 +8935,9 @@
To be added.
- A64: LDP Qt1, Qt2, [Xn]
+
+ A64: LDP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8909,7 +8968,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8940,7 +9001,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -8971,7 +9034,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9002,7 +9067,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9033,7 +9100,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9064,7 +9133,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9095,7 +9166,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9126,7 +9199,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9157,7 +9232,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9188,7 +9265,9 @@
To be added.
- A64: LDNP Qt1, Qt2, [Xn]
+
+ A64: LDNP Qt1, Qt2, [Xn]
+
To be added.
To be added.
@@ -9219,7 +9298,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9250,7 +9331,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9281,7 +9364,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9312,7 +9397,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9343,7 +9430,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9374,7 +9463,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9405,7 +9496,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9436,7 +9529,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9467,7 +9562,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9498,7 +9595,9 @@
To be added.
- A64: LDP Dt1, Dt2, [Xn]
+
+ A64: LDP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9529,7 +9628,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9560,7 +9661,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9591,7 +9694,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9622,7 +9727,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9653,7 +9760,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9684,7 +9793,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9715,7 +9826,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9746,7 +9859,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9777,7 +9892,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -9808,7 +9925,9 @@
To be added.
- A64: LDNP Dt1, Dt2, [Xn]
+
+ A64: LDNP Dt1, Dt2, [Xn]
+
To be added.
To be added.
@@ -11928,7 +12047,10 @@
To be added.
To be added.
- int16_t vqdmulhh_s16 (int16_t a, int16_t b) A64: SQDMULH Hd, Hn, Hm
+
+ int16_t vqdmulhh_s16 (int16_t a, int16_t b)
+ A64: SQDMULH Hd, Hn, Hm
+
To be added.
To be added.
@@ -11958,7 +12080,10 @@
To be added.
To be added.
- int32_t vqdmulhs_s32 (int32_t a, int32_t b) A64: SQDMULH Sd, Sn, Sm
+
+ int32_t vqdmulhs_s32 (int32_t a, int32_t b)
+ A64: SQDMULH Sd, Sn, Sm
+
To be added.
To be added.
@@ -11997,7 +12122,10 @@
To be added.
To be added.
To be added.
- int16_t vqdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane) A64: SQDMULH Hd, Hn, Vm.H[lane]
+
+ int16_t vqdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane)
+ A64: SQDMULH Hd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12036,7 +12164,10 @@
To be added.
To be added.
To be added.
- int16_t vqdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane) A64: SQDMULH Hd, Hn, Vm.H[lane]
+
+ int16_t vqdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane)
+ A64: SQDMULH Hd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12075,7 +12206,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane) A64: SQDMULH Sd, Sn, Vm.S[lane]
+
+ int32_t vqdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane)
+ A64: SQDMULH Sd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12114,7 +12248,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane) A64: SQDMULH Sd, Sn, Vm.S[lane]
+
+ int32_t vqdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane)
+ A64: SQDMULH Sd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12146,7 +12283,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmlalh_s16 (int32_t a, int16_t b, int16_t c) A64: SQDMLAL Sd, Hn, Hm
+
+ int32_t vqdmlalh_s16 (int32_t a, int16_t b, int16_t c)
+ A64: SQDMLAL Sd, Hn, Hm
+
To be added.
To be added.
@@ -12178,7 +12318,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmlals_s32 (int64_t a, int32_t b, int32_t c) A64: SQDMLAL Dd, Sn, Sm
+
+ int64_t vqdmlals_s32 (int64_t a, int32_t b, int32_t c)
+ A64: SQDMLAL Dd, Sn, Sm
+
To be added.
To be added.
@@ -12210,7 +12353,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmlslh_s16 (int32_t a, int16_t b, int16_t c) A64: SQDMLSL Sd, Hn, Hm
+
+ int32_t vqdmlslh_s16 (int32_t a, int16_t b, int16_t c)
+ A64: SQDMLSL Sd, Hn, Hm
+
To be added.
To be added.
@@ -12242,7 +12388,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmlsls_s32 (int64_t a, int32_t b, int32_t c) A64: SQDMLSL Dd, Sn, Sm
+
+ int64_t vqdmlsls_s32 (int64_t a, int32_t b, int32_t c)
+ A64: SQDMLSL Dd, Sn, Sm
+
To be added.
To be added.
@@ -12272,7 +12421,10 @@
To be added.
To be added.
- int32_t vqdmullh_s16 (int16_t a, int16_t b) A64: SQDMULL Sd, Hn, Hm
+
+ int32_t vqdmullh_s16 (int16_t a, int16_t b)
+ A64: SQDMULL Sd, Hn, Hm
+
To be added.
To be added.
@@ -12302,7 +12454,10 @@
To be added.
To be added.
- int64_t vqdmulls_s32 (int32_t a, int32_t b) A64: SQDMULL Dd, Sn, Sm
+
+ int64_t vqdmulls_s32 (int32_t a, int32_t b)
+ A64: SQDMULL Dd, Sn, Sm
+
To be added.
To be added.
@@ -12341,7 +12496,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmullh_laneq_s16 (int16_t a, int16x8_t v, const int lane) A64: SQDMULL Sd, Hn, Vm.H[lane]
+
+ int32_t vqdmullh_laneq_s16 (int16_t a, int16x8_t v, const int lane)
+ A64: SQDMULL Sd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12380,7 +12538,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmullh_lane_s16 (int16_t a, int16x4_t v, const int lane) A64: SQDMULL Sd, Hn, Vm.H[lane]
+
+ int32_t vqdmullh_lane_s16 (int16_t a, int16x4_t v, const int lane)
+ A64: SQDMULL Sd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12419,7 +12580,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmulls_laneq_s32 (int32_t a, int32x4_t v, const int lane) A64: SQDMULL Dd, Sn, Vm.S[lane]
+
+ int64_t vqdmulls_laneq_s32 (int32_t a, int32x4_t v, const int lane)
+ A64: SQDMULL Dd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12458,7 +12622,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmulls_lane_s32 (int32_t a, int32x2_t v, const int lane) A64: SQDMULL Dd, Sn, Vm.S[lane]
+
+ int64_t vqdmulls_lane_s32 (int32_t a, int32x2_t v, const int lane)
+ A64: SQDMULL Dd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12499,7 +12666,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmlalh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane) A64: SQDMLAL Sd, Hn, Vm.H[lane]
+
+ int32_t vqdmlalh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane)
+ A64: SQDMLAL Sd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12540,7 +12710,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmlalh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane) A64: SQDMLAL Sd, Hn, Vm.H[lane]
+
+ int32_t vqdmlalh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane)
+ A64: SQDMLAL Sd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12581,7 +12754,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmlals_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane) A64: SQDMLAL Dd, Sn, Vm.S[lane]
+
+ int64_t vqdmlals_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane)
+ A64: SQDMLAL Dd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12622,7 +12798,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmlals_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane) A64: SQDMLAL Dd, Sn, Vm.S[lane]
+
+ int64_t vqdmlals_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane)
+ A64: SQDMLAL Dd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12663,7 +12842,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmlslh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane) A64: SQDMLSL Sd, Hn, Vm.H[lane]
+
+ int32_t vqdmlslh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane)
+ A64: SQDMLSL Sd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12704,7 +12886,10 @@
To be added.
To be added.
To be added.
- int32_t vqdmlslh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane) A64: SQDMLSL Sd, Hn, Vm.H[lane]
+
+ int32_t vqdmlslh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane)
+ A64: SQDMLSL Sd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -12745,7 +12930,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmlsls_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane) A64: SQDMLSL Dd, Sn, Vm.S[lane]
+
+ int64_t vqdmlsls_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane)
+ A64: SQDMLSL Dd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -12786,7 +12974,10 @@
To be added.
To be added.
To be added.
- int64_t vqdmlsls_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane) A64: SQDMLSL Dd, Sn, Vm.S[lane]
+
+ int64_t vqdmlsls_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane)
+ A64: SQDMLSL Dd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -13350,7 +13541,10 @@
To be added.
To be added.
- int16_t vqrdmulhh_s16 (int16_t a, int16_t b) A64: SQRDMULH Hd, Hn, Hm
+
+ int16_t vqrdmulhh_s16 (int16_t a, int16_t b)
+ A64: SQRDMULH Hd, Hn, Hm
+
To be added.
To be added.
@@ -13380,7 +13574,10 @@
To be added.
To be added.
- int32_t vqrdmulhs_s32 (int32_t a, int32_t b) A64: SQRDMULH Sd, Sn, Sm
+
+ int32_t vqrdmulhs_s32 (int32_t a, int32_t b)
+ A64: SQRDMULH Sd, Sn, Sm
+
To be added.
To be added.
@@ -13419,7 +13616,10 @@
To be added.
To be added.
To be added.
- int16_t vqrdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane) A64: SQRDMULH Hd, Hn, Vm.H[lane]
+
+ int16_t vqrdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane)
+ A64: SQRDMULH Hd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -13458,7 +13658,10 @@
To be added.
To be added.
To be added.
- int16_t vqrdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane) A64: SQRDMULH Hd, Hn, Vm.H[lane]
+
+ int16_t vqrdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane)
+ A64: SQRDMULH Hd, Hn, Vm.H[lane]
+
To be added.
To be added.
@@ -13497,7 +13700,10 @@
To be added.
To be added.
To be added.
- int32_t vqrdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane) A64: SQRDMULH Sd, Sn, Vm.S[lane]
+
+ int32_t vqrdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane)
+ A64: SQRDMULH Sd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -13536,7 +13742,10 @@
To be added.
To be added.
To be added.
- int32_t vqrdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane) A64: SQRDMULH Sd, Sn, Vm.S[lane]
+
+ int32_t vqrdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane)
+ A64: SQRDMULH Sd, Sn, Vm.S[lane]
+
To be added.
To be added.
@@ -16589,7 +16798,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16619,7 +16830,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -16649,7 +16862,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16679,7 +16894,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -16709,7 +16926,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16739,7 +16958,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -16769,7 +16990,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16799,7 +17022,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -16829,7 +17054,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16859,7 +17086,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -16889,7 +17118,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16919,7 +17150,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -16949,7 +17182,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -16979,7 +17214,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -17009,7 +17246,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -17039,7 +17278,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -17069,7 +17310,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -17099,7 +17342,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -17129,7 +17374,9 @@
To be added.
To be added.
To be added.
- A64: STP Qt1, Qt2, [Xn]
+
+ A64: STP Qt1, Qt2, [Xn]
+
To be added.
@@ -17159,7 +17406,9 @@
To be added.
To be added.
To be added.
- A64: STP Dt1, Dt2, [Xn]
+
+ A64: STP Dt1, Dt2, [Xn]
+
To be added.
@@ -17189,7 +17438,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17219,7 +17470,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17249,7 +17502,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17279,7 +17534,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17309,7 +17566,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17339,7 +17598,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17369,7 +17630,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17399,7 +17662,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17429,7 +17694,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17459,7 +17726,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17489,7 +17758,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17519,7 +17790,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17549,7 +17822,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17579,7 +17854,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17609,7 +17886,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17639,7 +17918,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17669,7 +17950,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17699,7 +17982,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17729,7 +18014,9 @@
To be added.
To be added.
To be added.
- A64: STNP Qt1, Qt2, [Xn]
+
+ A64: STNP Qt1, Qt2, [Xn]
+
To be added.
@@ -17759,7 +18046,9 @@
To be added.
To be added.
To be added.
- A64: STNP Dt1, Dt2, [Xn]
+
+ A64: STNP Dt1, Dt2, [Xn]
+
To be added.
@@ -17789,7 +18078,9 @@
To be added.
To be added.
To be added.
- A64: STP St1, St2, [Xn]
+
+ A64: STP St1, St2, [Xn]
+
To be added.
@@ -17819,7 +18110,9 @@
To be added.
To be added.
To be added.
- A64: STP St1, St2, [Xn]
+
+ A64: STP St1, St2, [Xn]
+
To be added.
@@ -17849,7 +18142,9 @@
To be added.
To be added.
To be added.
- A64: STP St1, St2, [Xn]
+
+ A64: STP St1, St2, [Xn]
+
To be added.
@@ -17879,7 +18174,9 @@
To be added.
To be added.
To be added.
- A64: STNP St1, St2, [Xn]
+
+ A64: STNP St1, St2, [Xn]
+
To be added.
@@ -17909,7 +18206,9 @@
To be added.
To be added.
To be added.
- A64: STNP St1, St2, [Xn]
+
+ A64: STNP St1, St2, [Xn]
+
To be added.
@@ -17939,7 +18238,9 @@
To be added.
To be added.
To be added.
- A64: STNP St1, St2, [Xn]
+
+ A64: STNP St1, St2, [Xn]
+
To be added.
@@ -20506,7 +20807,10 @@
To be added.
To be added.
- uint8x16_t vqtbl4q_u8(uint8x16x4_t t, uint8x16_t idx) A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
+ uint8x16_t vqtbl4q_u8(uint8x16x4_t t, uint8x16_t idx)
+ A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
To be added.
To be added.
@@ -20533,7 +20837,10 @@
To be added.
To be added.
- uint8x16_t vqtbl3q_u8(uint8x16x3_t t, uint8x16_t idx) A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
+ uint8x16_t vqtbl3q_u8(uint8x16x3_t t, uint8x16_t idx)
+ A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
To be added.
To be added.
@@ -20560,7 +20867,10 @@
To be added.
To be added.
- uint8x16_t vqtbl2q_u8(uint8x16x2_t t, uint8x16_t idx) A64: TBL Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
+ uint8x16_t vqtbl2q_u8(uint8x16x2_t t, uint8x16_t idx)
+ A64: TBL Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
To be added.
To be added.
@@ -20587,7 +20897,10 @@
To be added.
To be added.
- int8x16_t vqtbl4q_s8(int8x16x4_t t, uint8x16_t idx) A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
+ int8x16_t vqtbl4q_s8(int8x16x4_t t, uint8x16_t idx)
+ A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
To be added.
To be added.
@@ -20614,7 +20927,10 @@
To be added.
To be added.
- int8x16_t vqtbl3q_s8(int8x16x3_t t, uint8x16_t idx) A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
+ int8x16_t vqtbl3q_s8(int8x16x3_t t, uint8x16_t idx)
+ A64: TBL Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
To be added.
To be added.
@@ -20641,7 +20957,10 @@
To be added.
To be added.
- int8x16_t vqtbl2q_s8(int8x16x2_t t, uint8x16_t idx) A64: TBL Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
+ int8x16_t vqtbl2q_s8(int8x16x2_t t, uint8x16_t idx)
+ A64: TBL Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
To be added.
To be added.
@@ -20705,7 +21024,10 @@
To be added.
To be added.
To be added.
- uint8x16_t vqtbx4q_u8(uint8x16x4_t t, uint8x16_t idx) A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
+ uint8x16_t vqtbx4q_u8(uint8x16x4_t t, uint8x16_t idx)
+ A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
To be added.
To be added.
@@ -20734,7 +21056,10 @@
To be added.
To be added.
To be added.
- uint8x16_t vqtbx3q_u8(uint8x16x3_t t, uint8x16_t idx) A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
+ uint8x16_t vqtbx3q_u8(uint8x16x3_t t, uint8x16_t idx)
+ A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
To be added.
To be added.
@@ -20763,7 +21088,10 @@
To be added.
To be added.
To be added.
- uint8x16_t vqtbx2q_u8(uint8x16x2_t t, uint8x16_t idx) A64: TBX Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
+ uint8x16_t vqtbx2q_u8(uint8x16x2_t t, uint8x16_t idx)
+ A64: TBX Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
To be added.
To be added.
@@ -20827,7 +21155,10 @@
To be added.
To be added.
To be added.
- int8x16_t vqtbx4q_s8(int8x16x4_t t, uint8x16_t idx) A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
+ int8x16_t vqtbx4q_s8(int8x16x4_t t, uint8x16_t idx)
+ A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.16B
+
To be added.
To be added.
@@ -20856,7 +21187,10 @@
To be added.
To be added.
To be added.
- int8x16_t vqtbx3q_s8(int8x16x3_t t, uint8x16_t idx) A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
+ int8x16_t vqtbx3q_s8(int8x16x3_t t, uint8x16_t idx)
+ A64: TBX Vd.16B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.16B
+
To be added.
To be added.
@@ -20885,7 +21219,10 @@
To be added.
To be added.
To be added.
- int8x16_t vqtbx2q_s8(int8x16x2_t t, uint8x16_t idx) A64: TBX Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
+ int8x16_t vqtbx2q_s8(int8x16x2_t t, uint8x16_t idx)
+ A64: TBX Vd.16B, {Vn.16B, Vn+1.16B}, Vm.16B
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml b/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml
index 8d252ef4dac..9343c35a1db 100644
--- a/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml
@@ -15261,7 +15261,11 @@
To be added.
- int8x8_t vqmovn_s16 (int16x8_t a) A32: VQMOVN.S16 Dd, Qm A64: SQXTN Vd.8B, Vn.8H
+
+ int8x8_t vqmovn_s16 (int16x8_t a)
+ A32: VQMOVN.S16 Dd, Qm
+ A64: SQXTN Vd.8B, Vn.8H
+
To be added.
To be added.
@@ -15289,7 +15293,11 @@
To be added.
- int16x4_t vqmovn_s32 (int32x4_t a) A32: VQMOVN.S32 Dd, Qm A64: SQXTN Vd.4H, Vn.4S
+
+ int16x4_t vqmovn_s32 (int32x4_t a)
+ A32: VQMOVN.S32 Dd, Qm
+ A64: SQXTN Vd.4H, Vn.4S
+
To be added.
To be added.
@@ -15317,7 +15325,11 @@
To be added.
- int32x2_t vqmovn_s64 (int64x2_t a) A32: VQMOVN.S64 Dd, Qm A64: SQXTN Vd.2S, Vn.2D
+
+ int32x2_t vqmovn_s64 (int64x2_t a)
+ A32: VQMOVN.S64 Dd, Qm
+ A64: SQXTN Vd.2S, Vn.2D
+
To be added.
To be added.
@@ -15345,7 +15357,11 @@
To be added.
- uint8x8_t vqmovn_u16 (uint16x8_t a) A32: VQMOVN.U16 Dd, Qm A64: UQXTN Vd.8B, Vn.8H
+
+ uint8x8_t vqmovn_u16 (uint16x8_t a)
+ A32: VQMOVN.U16 Dd, Qm
+ A64: UQXTN Vd.8B, Vn.8H
+
To be added.
To be added.
@@ -15373,7 +15389,11 @@
To be added.
- uint16x4_t vqmovn_u32 (uint32x4_t a) A32: VQMOVN.U32 Dd, Qm A64: UQXTN Vd.4H, Vn.4S
+
+ uint16x4_t vqmovn_u32 (uint32x4_t a)
+ A32: VQMOVN.U32 Dd, Qm
+ A64: UQXTN Vd.4H, Vn.4S
+
To be added.
To be added.
@@ -15401,7 +15421,11 @@
To be added.
- uint32x2_t vqmovn_u64 (uint64x2_t a) A32: VQMOVN.U64 Dd, Qm A64: UQXTN Vd.2S, Vn.2D
+
+ uint32x2_t vqmovn_u64 (uint64x2_t a)
+ A32: VQMOVN.U64 Dd, Qm
+ A64: UQXTN Vd.2S, Vn.2D
+
To be added.
To be added.
@@ -15429,7 +15453,11 @@
To be added.
- uint8x8_t vqmovun_s16 (int16x8_t a) A32: VQMOVUN.S16 Dd, Qm A64: SQXTUN Vd.8B, Vn.8H
+
+ uint8x8_t vqmovun_s16 (int16x8_t a)
+ A32: VQMOVUN.S16 Dd, Qm
+ A64: SQXTUN Vd.8B, Vn.8H
+
To be added.
To be added.
@@ -15457,7 +15485,11 @@
To be added.
- uint16x4_t vqmovun_s32 (int32x4_t a) A32: VQMOVUN.S32 Dd, Qm A64: SQXTUN Vd.4H, Vn.4S
+
+ uint16x4_t vqmovun_s32 (int32x4_t a)
+ A32: VQMOVUN.S32 Dd, Qm
+ A64: SQXTUN Vd.4H, Vn.4S
+
To be added.
To be added.
@@ -15485,7 +15517,11 @@
To be added.
- uint32x2_t vqmovun_s64 (int64x2_t a) A32: VQMOVUN.S64 Dd, Qm A64: SQXTUN Vd.2S, Vn.2D
+
+ uint32x2_t vqmovun_s64 (int64x2_t a)
+ A32: VQMOVUN.S64 Dd, Qm
+ A64: SQXTUN Vd.2S, Vn.2D
+
To be added.
To be added.
@@ -15515,7 +15551,11 @@
To be added.
To be added.
- uint8x16_t vqmovun_high_s16 (uint8x8_t r, int16x8_t a) A32: VQMOVUN.S16 Dd+1, Qm A64: SQXTUN2 Vd.16B, Vn.8H
+
+ uint8x16_t vqmovun_high_s16 (uint8x8_t r, int16x8_t a)
+ A32: VQMOVUN.S16 Dd+1, Qm
+ A64: SQXTUN2 Vd.16B, Vn.8H
+
To be added.
To be added.
@@ -15545,7 +15585,11 @@
To be added.
To be added.
- uint16x8_t vqmovun_high_s32 (uint16x4_t r, int32x4_t a) A32: VQMOVUN.S32 Dd+1, Qm A64: SQXTUN2 Vd.8H, Vn.4S
+
+ uint16x8_t vqmovun_high_s32 (uint16x4_t r, int32x4_t a)
+ A32: VQMOVUN.S32 Dd+1, Qm
+ A64: SQXTUN2 Vd.8H, Vn.4S
+
To be added.
To be added.
@@ -15575,7 +15619,11 @@
To be added.
To be added.
- uint32x4_t vqmovun_high_s64 (uint32x2_t r, int64x2_t a) A32: VQMOVUN.S64 Dd+1, Qm A64: SQXTUN2 Vd.4S, Vn.2D
+
+ uint32x4_t vqmovun_high_s64 (uint32x2_t r, int64x2_t a)
+ A32: VQMOVUN.S64 Dd+1, Qm
+ A64: SQXTUN2 Vd.4S, Vn.2D
+
To be added.
To be added.
@@ -15605,7 +15653,11 @@
To be added.
To be added.
- uint8x16_t vqmovn_high_u16 (uint8x8_t r, uint16x8_t a) A32: VQMOVN.U16 Dd+1, Qm A64: UQXTN2 Vd.16B, Vn.8H
+
+ uint8x16_t vqmovn_high_u16 (uint8x8_t r, uint16x8_t a)
+ A32: VQMOVN.U16 Dd+1, Qm
+ A64: UQXTN2 Vd.16B, Vn.8H
+
To be added.
To be added.
@@ -15635,7 +15687,11 @@
To be added.
To be added.
- int16x8_t vqmovn_high_s32 (int16x4_t r, int32x4_t a) A32: VQMOVN.S32 Dd+1, Qm A64: SQXTN2 Vd.8H, Vn.4S
+
+ int16x8_t vqmovn_high_s32 (int16x4_t r, int32x4_t a)
+ A32: VQMOVN.S32 Dd+1, Qm
+ A64: SQXTN2 Vd.8H, Vn.4S
+
To be added.
To be added.
@@ -15665,7 +15721,11 @@
To be added.
To be added.
- int32x4_t vqmovn_high_s64 (int32x2_t r, int64x2_t a) A32: VQMOVN.S64 Dd+1, Qm A64: SQXTN2 Vd.4S, Vn.2D
+
+ int32x4_t vqmovn_high_s64 (int32x2_t r, int64x2_t a)
+ A32: VQMOVN.S64 Dd+1, Qm
+ A64: SQXTN2 Vd.4S, Vn.2D
+
To be added.
To be added.
@@ -15695,7 +15755,11 @@
To be added.
To be added.
- int8x16_t vqmovn_high_s16 (int8x8_t r, int16x8_t a) A32: VQMOVN.S16 Dd+1, Qm A64: SQXTN2 Vd.16B, Vn.8H
+
+ int8x16_t vqmovn_high_s16 (int8x8_t r, int16x8_t a)
+ A32: VQMOVN.S16 Dd+1, Qm
+ A64: SQXTN2 Vd.16B, Vn.8H
+
To be added.
To be added.
@@ -15725,7 +15789,11 @@
To be added.
To be added.
- uint16x8_t vqmovn_high_u32 (uint16x4_t r, uint32x4_t a) A32: VQMOVN.U32 Dd+1, Qm A64: UQXTN2 Vd.8H, Vn.4S
+
+ uint16x8_t vqmovn_high_u32 (uint16x4_t r, uint32x4_t a)
+ A32: VQMOVN.U32 Dd+1, Qm
+ A64: UQXTN2 Vd.8H, Vn.4S
+
To be added.
To be added.
@@ -15755,7 +15823,11 @@
To be added.
To be added.
- uint32x4_t vqmovn_high_u64 (uint32x2_t r, uint64x2_t a) A32: VQMOVN.U64 Dd+1, Qm A64: UQXTN2 Vd.4S, Vn.2D
+
+ uint32x4_t vqmovn_high_u64 (uint32x2_t r, uint64x2_t a)
+ A32: VQMOVN.U64 Dd+1, Qm
+ A64: UQXTN2 Vd.4S, Vn.2D
+
To be added.
To be added.
@@ -19244,7 +19316,11 @@
To be added.
To be added.
To be added.
- float64x2_t vcopyq_lane_f64 (float64x2_t a, const int lane1, float64x1_t b, const int lane2) A32: VMOV.F64 Dd, Dm A64: INS Vd.D[lane1], Vn.D[0]
+
+ float64x2_t vcopyq_lane_f64 (float64x2_t a, const int lane1, float64x1_t b, const int lane2)
+ A32: VMOV.F64 Dd, Dm
+ A64: INS Vd.D[lane1], Vn.D[0]
+
To be added.
To be added.
@@ -19283,7 +19359,11 @@
To be added.
To be added.
To be added.
- int64x2_t vcopyq_lane_s64 (int64x2_t a, const int lane1, int64x1_t b, const int lane2) A32: VMOV Dd, Dm A64: INS Vd.D[lane1], Vn.D[0]
+
+ int64x2_t vcopyq_lane_s64 (int64x2_t a, const int lane1, int64x1_t b, const int lane2)
+ A32: VMOV Dd, Dm
+ A64: INS Vd.D[lane1], Vn.D[0]
+
To be added.
To be added.
@@ -19322,7 +19402,11 @@
To be added.
To be added.
To be added.
- uint64x2_t vcopyq_lane_u64 (uint64x2_t a, const int lane1, uint64x1_t b, const int lane2) A32: VMOV Dd, Dm A64: INS Vd.D[lane1], Vn.D[0]
+
+ uint64x2_t vcopyq_lane_u64 (uint64x2_t a, const int lane1, uint64x1_t b, const int lane2)
+ A32: VMOV Dd, Dm
+ A64: INS Vd.D[lane1], Vn.D[0]
+
To be added.
To be added.
@@ -28664,7 +28748,11 @@
To be added.
To be added.
- int16x8_t vqdmulhq_n_s16 (int16x8_t a, int16_t b) A32: VQDMULH.S16 Qd, Qn, Dm[0] A64: SQDMULH Vd.8H, Vn.8H, Vm.H[0]
+
+ int16x8_t vqdmulhq_n_s16 (int16x8_t a, int16_t b)
+ A32: VQDMULH.S16 Qd, Qn, Dm[0]
+ A64: SQDMULH Vd.8H, Vn.8H, Vm.H[0]
+
To be added.
To be added.
@@ -28694,7 +28782,11 @@
To be added.
To be added.
- int32x4_t vqdmulhq_n_s32 (int32x4_t a, int32_t b) A32: VQDMULH.S32 Qd, Qn, Dm[0] A64: SQDMULH Vd.4S, Vn.4S, Vm.S[0]
+
+ int32x4_t vqdmulhq_n_s32 (int32x4_t a, int32_t b)
+ A32: VQDMULH.S32 Qd, Qn, Dm[0]
+ A64: SQDMULH Vd.4S, Vn.4S, Vm.S[0]
+
To be added.
To be added.
@@ -28724,7 +28816,11 @@
To be added.
To be added.
- int16x4_t vqdmulh_n_s16 (int16x4_t a, int16_t b) A32: VQDMULH.S16 Dd, Dn, Dm[0] A64: SQDMULH Vd.4H, Vn.4H, Vm.H[0]
+
+ int16x4_t vqdmulh_n_s16 (int16x4_t a, int16_t b)
+ A32: VQDMULH.S16 Dd, Dn, Dm[0]
+ A64: SQDMULH Vd.4H, Vn.4H, Vm.H[0]
+
To be added.
To be added.
@@ -28754,7 +28850,11 @@
To be added.
To be added.
- int32x2_t vqdmulh_n_s32 (int32x2_t a, int32_t b) A32: VQDMULH.S32 Dd, Dn, Dm[0] A64: SQDMULH Vd.2S, Vn.2S, Vm.S[0]
+
+ int32x2_t vqdmulh_n_s32 (int32x2_t a, int32_t b)
+ A32: VQDMULH.S32 Dd, Dn, Dm[0]
+ A64: SQDMULH Vd.2S, Vn.2S, Vm.S[0]
+
To be added.
To be added.
@@ -28793,7 +28893,11 @@
To be added.
To be added.
To be added.
- int16x8_t vqdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VQDMULH.S16 Qd, Qn, Dm[lane] A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
+ int16x8_t vqdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane)
+ A32: VQDMULH.S16 Qd, Qn, Dm[lane]
+ A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -28832,7 +28936,11 @@
To be added.
To be added.
To be added.
- int16x8_t vqdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) A32: VQDMULH.S16 Qd, Qn, Dm[lane] A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
+ int16x8_t vqdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane)
+ A32: VQDMULH.S16 Qd, Qn, Dm[lane]
+ A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -28871,7 +28979,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VQDMULH.S32 Qd, Qn, Dm[lane] A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
+ int32x4_t vqdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane)
+ A32: VQDMULH.S32 Qd, Qn, Dm[lane]
+ A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -28910,7 +29022,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VQDMULH.S32 Qd, Qn, Dm[lane] A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
+ int32x4_t vqdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane)
+ A32: VQDMULH.S32 Qd, Qn, Dm[lane]
+ A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -28949,7 +29065,11 @@
To be added.
To be added.
To be added.
- int16x4_t vqdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) A32: VQDMULH.S16 Dd, Dn, Dm[lane] A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
+ int16x4_t vqdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane)
+ A32: VQDMULH.S16 Dd, Dn, Dm[lane]
+ A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -28988,7 +29108,11 @@
To be added.
To be added.
To be added.
- int16x4_t vqdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VQDMULH.S16 Dd, Dn, Dm[lane] A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
+ int16x4_t vqdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane)
+ A32: VQDMULH.S16 Dd, Dn, Dm[lane]
+ A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -29027,7 +29151,11 @@
To be added.
To be added.
To be added.
- int32x2_t vqdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VQDMULH.S32 Dd, Dn, Dm[lane] A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
+ int32x2_t vqdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane)
+ A32: VQDMULH.S32 Dd, Dn, Dm[lane]
+ A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -29066,7 +29194,11 @@
To be added.
To be added.
To be added.
- int32x2_t vqdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) A32: VQDMULH.S32 Dd, Dn, Dm[lane] A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
+ int32x2_t vqdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane)
+ A32: VQDMULH.S32 Dd, Dn, Dm[lane]
+ A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -29096,7 +29228,11 @@
To be added.
To be added.
- int16x8_t vqdmulhq_s16 (int16x8_t a, int16x8_t b) A32: VQDMULH.S16 Qd, Qn, Qm A64: SQDMULH Vd.8H, Vn.8H, Vm.8H
+
+ int16x8_t vqdmulhq_s16 (int16x8_t a, int16x8_t b)
+ A32: VQDMULH.S16 Qd, Qn, Qm
+ A64: SQDMULH Vd.8H, Vn.8H, Vm.8H
+
To be added.
To be added.
@@ -29126,7 +29262,11 @@
To be added.
To be added.
- int32x4_t vqdmulhq_s32 (int32x4_t a, int32x4_t b) A32: VQDMULH.S32 Qd, Qn, Qm A64: SQDMULH Vd.4S, Vn.4S, Vm.4S
+
+ int32x4_t vqdmulhq_s32 (int32x4_t a, int32x4_t b)
+ A32: VQDMULH.S32 Qd, Qn, Qm
+ A64: SQDMULH Vd.4S, Vn.4S, Vm.4S
+
To be added.
To be added.
@@ -29156,7 +29296,11 @@
To be added.
To be added.
- int16x4_t vqdmulh_s16 (int16x4_t a, int16x4_t b) A32: VQDMULH.S16 Dd, Dn, Dm A64: SQDMULH Vd.4H, Vn.4H, Vm.4H
+
+ int16x4_t vqdmulh_s16 (int16x4_t a, int16x4_t b)
+ A32: VQDMULH.S16 Dd, Dn, Dm
+ A64: SQDMULH Vd.4H, Vn.4H, Vm.4H
+
To be added.
To be added.
@@ -29186,7 +29330,11 @@
To be added.
To be added.
- int32x2_t vqdmulh_s32 (int32x2_t a, int32x2_t b) A32: VQDMULH.S32 Dd, Dn, Dm A64: SQDMULH Vd.2S, Vn.2S, Vm.2S
+
+ int32x2_t vqdmulh_s32 (int32x2_t a, int32x2_t b)
+ A32: VQDMULH.S32 Dd, Dn, Dm
+ A64: SQDMULH Vd.2S, Vn.2S, Vm.2S
+
To be added.
To be added.
@@ -29218,7 +29366,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) A32: VQDMLAL.S16 Qd, Dn, Dm A64: SQDMLAL Vd.4S, Vn.4H, Vm.4H
+
+ int32x4_t vqdmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+ A32: VQDMLAL.S16 Qd, Dn, Dm
+ A64: SQDMLAL Vd.4S, Vn.4H, Vm.4H
+
To be added.
To be added.
@@ -29250,7 +29402,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) A32: VQDMLAL.S32 Qd, Dn, Dm A64: SQDMLAL Vd.2D, Vn.2S, Vm.2S
+
+ int64x2_t vqdmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+ A32: VQDMLAL.S32 Qd, Dn, Dm
+ A64: SQDMLAL Vd.2D, Vn.2S, Vm.2S
+
To be added.
To be added.
@@ -29282,7 +29438,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c) A32: VQDMLSL.S16 Qd, Dn, Dm A64: SQDMLSL Vd.4S, Vn.4H, Vm.4H
+
+ int32x4_t vqdmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+ A32: VQDMLSL.S16 Qd, Dn, Dm
+ A64: SQDMLSL Vd.4S, Vn.4H, Vm.4H
+
To be added.
To be added.
@@ -29314,7 +29474,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c) A32: VQDMLSL.S32 Qd, Dn, Dm A64: SQDMLSL Vd.2D, Vn.2S, Vm.2S
+
+ int64x2_t vqdmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+ A32: VQDMLSL.S32 Qd, Dn, Dm
+ A64: SQDMLSL Vd.2D, Vn.2S, Vm.2S
+
To be added.
To be added.
@@ -29346,7 +29510,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c) A32: VQDMLAL.S16 Qd, Dn, Dm[0] A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[0]
+
+ int32x4_t vqdmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+ A32: VQDMLAL.S16 Qd, Dn, Dm[0]
+ A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[0]
+
To be added.
To be added.
@@ -29378,7 +29546,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c) A32: VQDMLAL.S32 Qd, Dn, Dm[0] A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[0]
+
+ int64x2_t vqdmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+ A32: VQDMLAL.S32 Qd, Dn, Dm[0]
+ A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[0]
+
To be added.
To be added.
@@ -29410,7 +29582,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c) A32: VQDMLSL.S16 Qd, Dn, Dm[0] A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[0]
+
+ int32x4_t vqdmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+ A32: VQDMLSL.S16 Qd, Dn, Dm[0]
+ A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[0]
+
To be added.
To be added.
@@ -29442,7 +29618,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c) A32: VQDMLSL.S32 Qd, Dn, Dm[0] A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[0]
+
+ int64x2_t vqdmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+ A32: VQDMLSL.S32 Qd, Dn, Dm[0]
+ A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[0]
+
To be added.
To be added.
@@ -29483,7 +29663,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) A32: VQDMLAL.S16 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane]
+
+ int32x4_t vqdmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane)
+ A32: VQDMLAL.S16 Qd, Dn, Dm[lane]
+ A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -29524,7 +29708,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VQDMLAL.S16 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane]
+
+ int32x4_t vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane)
+ A32: VQDMLAL.S16 Qd, Dn, Dm[lane]
+ A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -29565,7 +29753,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VQDMLAL.S32 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane]
+
+ int64x2_t vqdmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane)
+ A32: VQDMLAL.S32 Qd, Dn, Dm[lane]
+ A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -29606,7 +29798,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VQDMLAL.S32 Qd, Dn, Dm[lane] A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane]
+
+ int64x2_t vqdmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane)
+ A32: VQDMLAL.S32 Qd, Dn, Dm[lane]
+ A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -29647,7 +29843,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) A32: VQDMLSL.S16 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane]
+
+ int32x4_t vqdmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane)
+ A32: VQDMLSL.S16 Qd, Dn, Dm[lane]
+ A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -29688,7 +29888,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) A32: VQDMLSL.S16 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane]
+
+ int32x4_t vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane)
+ A32: VQDMLSL.S16 Qd, Dn, Dm[lane]
+ A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -29729,7 +29933,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) A32: VQDMLSL.S32 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane]
+
+ int64x2_t vqdmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane)
+ A32: VQDMLSL.S32 Qd, Dn, Dm[lane]
+ A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -29770,7 +29978,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) A32: VQDMLSL.S32 Qd, Dn, Dm[lane] A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane]
+
+ int64x2_t vqdmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane)
+ A32: VQDMLSL.S32 Qd, Dn, Dm[lane]
+ A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -29800,7 +30012,11 @@
To be added.
To be added.
- int32x4_t vqdmull_s16 (int16x4_t a, int16x4_t b) A32: VQDMULL.S16 Qd, Dn, Dm A64: SQDMULL Vd.4S, Vn.4H, Vm.4H
+
+ int32x4_t vqdmull_s16 (int16x4_t a, int16x4_t b)
+ A32: VQDMULL.S16 Qd, Dn, Dm
+ A64: SQDMULL Vd.4S, Vn.4H, Vm.4H
+
To be added.
To be added.
@@ -29830,7 +30046,11 @@
To be added.
To be added.
- int64x2_t vqdmull_s32 (int32x2_t a, int32x2_t b) A32: VQDMULL.S32 Qd, Dn, Dm A64: SQDMULL Vd.2D, Vn.2S, Vm.2S
+
+ int64x2_t vqdmull_s32 (int32x2_t a, int32x2_t b)
+ A32: VQDMULL.S32 Qd, Dn, Dm
+ A64: SQDMULL Vd.2D, Vn.2S, Vm.2S
+
To be added.
To be added.
@@ -29860,7 +30080,11 @@
To be added.
To be added.
- int32x4_t vqdmull_n_s16 (int16x4_t a, int16_t b) A32: VQDMULL.S16 Qd, Dn, Dm[0] A64: SQDMULL Vd.4S, Vn.4H, Vm.H[0]
+
+ int32x4_t vqdmull_n_s16 (int16x4_t a, int16_t b)
+ A32: VQDMULL.S16 Qd, Dn, Dm[0]
+ A64: SQDMULL Vd.4S, Vn.4H, Vm.H[0]
+
To be added.
To be added.
@@ -29890,7 +30114,11 @@
To be added.
To be added.
- int64x2_t vqdmull_n_s32 (int32x2_t a, int32_t b) A32: VQDMULL.S32 Qd, Dn, Dm[0] A64: SQDMULL Vd.2D, Vn.2S, Vm.S[0]
+
+ int64x2_t vqdmull_n_s32 (int32x2_t a, int32_t b)
+ A32: VQDMULL.S32 Qd, Dn, Dm[0]
+ A64: SQDMULL Vd.2D, Vn.2S, Vm.S[0]
+
To be added.
To be added.
@@ -29929,7 +30157,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) A32: VQDMULL.S16 Qd, Dn, Dm[lane] A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane]
+
+ int32x4_t vqdmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane)
+ A32: VQDMULL.S16 Qd, Dn, Dm[lane]
+ A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -29968,7 +30200,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VQDMULL.S16 Qd, Dn, Dm[lane] A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane]
+
+ int32x4_t vqdmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane)
+ A32: VQDMULL.S16 Qd, Dn, Dm[lane]
+ A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -30007,7 +30243,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VQDMULL.S32 Qd, Dn, Dm[lane] A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane]
+
+ int64x2_t vqdmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane)
+ A32: VQDMULL.S32 Qd, Dn, Dm[lane]
+ A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -30046,7 +30286,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane) A32: VQDMULL.S32 Qd, Dn, Dm[lane] A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane]
+
+ int64x2_t vqdmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane)
+ A32: VQDMULL.S32 Qd, Dn, Dm[lane]
+ A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -30076,7 +30320,11 @@
To be added.
To be added.
- int32x4_t vqdmull_high_s16 (int16x8_t a, int16x8_t b) A32: VQDMULL.S16 Qd, Dn+1, Dm+1 A64: SQDMULL2 Vd.4S, Vn.8H, Vm.8H
+
+ int32x4_t vqdmull_high_s16 (int16x8_t a, int16x8_t b)
+ A32: VQDMULL.S16 Qd, Dn+1, Dm+1
+ A64: SQDMULL2 Vd.4S, Vn.8H, Vm.8H
+
To be added.
To be added.
@@ -30106,7 +30354,11 @@
To be added.
To be added.
- int64x2_t vqdmull_high_s32 (int32x4_t a, int32x4_t b) A32: VQDMULL.S32 Qd, Dn+1, Dm+1 A64: SQDMULL2 Vd.2D, Vn.4S, Vm.4S
+
+ int64x2_t vqdmull_high_s32 (int32x4_t a, int32x4_t b)
+ A32: VQDMULL.S32 Qd, Dn+1, Dm+1
+ A64: SQDMULL2 Vd.2D, Vn.4S, Vm.4S
+
To be added.
To be added.
@@ -30136,7 +30388,11 @@
To be added.
To be added.
- int32x4_t vqdmull_high_n_s16 (int16x8_t a, int16_t b) A32: VQDMULL.S16 Qd, Dn+1, Dm[0] A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[0]
+
+ int32x4_t vqdmull_high_n_s16 (int16x8_t a, int16_t b)
+ A32: VQDMULL.S16 Qd, Dn+1, Dm[0]
+ A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[0]
+
To be added.
To be added.
@@ -30166,7 +30422,11 @@
To be added.
To be added.
- int64x2_t vqdmull_high_n_s32 (int32x4_t a, int32_t b) A32: VQDMULL.S32 Qd, Dn+1, Dm[0] A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[0]
+
+ int64x2_t vqdmull_high_n_s32 (int32x4_t a, int32_t b)
+ A32: VQDMULL.S32 Qd, Dn+1, Dm[0]
+ A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[0]
+
To be added.
To be added.
@@ -30205,7 +30465,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VQDMULL.S16 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane]
+
+ int32x4_t vqdmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane)
+ A32: VQDMULL.S16 Qd, Dn+1, Dm[lane]
+ A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -30244,7 +30508,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane) A32: VQDMULL.S16 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane]
+
+ int32x4_t vqdmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane)
+ A32: VQDMULL.S16 Qd, Dn+1, Dm[lane]
+ A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -30283,7 +30551,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VQDMULL.S32 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane]
+
+ int64x2_t vqdmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane)
+ A32: VQDMULL.S32 Qd, Dn+1, Dm[lane]
+ A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -30322,7 +30594,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VQDMULL.S32 Qd, Dn+1, Dm[lane] A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane]
+
+ int64x2_t vqdmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane)
+ A32: VQDMULL.S32 Qd, Dn+1, Dm[lane]
+ A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -30354,7 +30630,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) A32: VQDMLAL.S16 Qd, Dn+1, Dm+1 A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.8H
+
+ int32x4_t vqdmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+ A32: VQDMLAL.S16 Qd, Dn+1, Dm+1
+ A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.8H
+
To be added.
To be added.
@@ -30386,7 +30666,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) A32: VQDMLAL.S32 Qd, Dn+1, Dm+1 A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.4S
+
+ int64x2_t vqdmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+ A32: VQDMLAL.S32 Qd, Dn+1, Dm+1
+ A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.4S
+
To be added.
To be added.
@@ -30418,7 +30702,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) A32: VQDMLSL.S16 Qd, Dn+1, Dm+1 A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.8H
+
+ int32x4_t vqdmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+ A32: VQDMLSL.S16 Qd, Dn+1, Dm+1
+ A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.8H
+
To be added.
To be added.
@@ -30450,7 +30738,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) A32: VQDMLSL.S32 Qd, Dn+1, Dm+1 A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.4S
+
+ int64x2_t vqdmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+ A32: VQDMLSL.S32 Qd, Dn+1, Dm+1
+ A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.4S
+
To be added.
To be added.
@@ -30482,7 +30774,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) A32: VQDMLAL.S16 Qd, Dn+1, Dm[0] A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[0]
+
+ int32x4_t vqdmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+ A32: VQDMLAL.S16 Qd, Dn+1, Dm[0]
+ A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[0]
+
To be added.
To be added.
@@ -30514,7 +30810,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) A32: VQDMLAL.S32 Qd, Dn+1, Dm[0] A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[0]
+
+ int64x2_t vqdmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+ A32: VQDMLAL.S32 Qd, Dn+1, Dm[0]
+ A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[0]
+
To be added.
To be added.
@@ -30546,7 +30846,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) A32: VQDMLSL.S16 Qd, Dn+1, Dm[0] A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[0]
+
+ int32x4_t vqdmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+ A32: VQDMLSL.S16 Qd, Dn+1, Dm[0]
+ A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[0]
+
To be added.
To be added.
@@ -30578,7 +30882,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) A32: VQDMLSL.S32 Qd, Dn+1, Dm[0] A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[0]
+
+ int64x2_t vqdmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+ A32: VQDMLSL.S32 Qd, Dn+1, Dm[0]
+ A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[0]
+
To be added.
To be added.
@@ -30619,7 +30927,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane]
+
+ int32x4_t vqdmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane)
+ A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane]
+ A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -30660,7 +30972,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane]
+
+ int32x4_t vqdmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane)
+ A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane]
+ A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -30701,7 +31017,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane]
+
+ int64x2_t vqdmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane)
+ A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane]
+ A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -30742,7 +31062,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane] A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane]
+
+ int64x2_t vqdmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane)
+ A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane]
+ A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -30783,7 +31107,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane]
+
+ int32x4_t vqdmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane)
+ A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane]
+ A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -30824,7 +31152,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqdmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane]
+
+ int32x4_t vqdmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane)
+ A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane]
+ A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -30865,7 +31197,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane]
+
+ int64x2_t vqdmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane)
+ A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane]
+ A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -30906,7 +31242,11 @@
To be added.
To be added.
To be added.
- int64x2_t vqdmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane] A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane]
+
+ int64x2_t vqdmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane)
+ A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane]
+ A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -30936,7 +31276,11 @@
To be added.
To be added.
- int16x8_t vqrdmulhq_n_s16 (int16x8_t a, int16_t b) A32: VQRDMULH.S16 Qd, Qn, Dm[0] A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[0]
+
+ int16x8_t vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
+ A32: VQRDMULH.S16 Qd, Qn, Dm[0]
+ A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[0]
+
To be added.
To be added.
@@ -30966,7 +31310,11 @@
To be added.
To be added.
- int32x4_t vqrdmulhq_n_s32 (int32x4_t a, int32_t b) A32: VQRDMULH.S32 Qd, Qn, Dm[0] A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[0]
+
+ int32x4_t vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
+ A32: VQRDMULH.S32 Qd, Qn, Dm[0]
+ A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[0]
+
To be added.
To be added.
@@ -30996,7 +31344,11 @@
To be added.
To be added.
- int16x4_t vqrdmulh_n_s16 (int16x4_t a, int16_t b) A32: VQRDMULH.S16 Dd, Dn, Dm[0] A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[0]
+
+ int16x4_t vqrdmulh_n_s16 (int16x4_t a, int16_t b)
+ A32: VQRDMULH.S16 Dd, Dn, Dm[0]
+ A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[0]
+
To be added.
To be added.
@@ -31026,7 +31378,11 @@
To be added.
To be added.
- int32x2_t vqrdmulh_n_s32 (int32x2_t a, int32_t b) A32: VQRDMULH.S32 Dd, Dn, Dm[0] A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[0]
+
+ int32x2_t vqrdmulh_n_s32 (int32x2_t a, int32_t b)
+ A32: VQRDMULH.S32 Dd, Dn, Dm[0]
+ A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[0]
+
To be added.
To be added.
@@ -31065,7 +31421,11 @@
To be added.
To be added.
To be added.
- int16x8_t vqrdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) A32: VQRDMULH.S16 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
+ int16x8_t vqrdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane)
+ A32: VQRDMULH.S16 Qd, Qn, Dm[lane]
+ A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -31104,7 +31464,11 @@
To be added.
To be added.
To be added.
- int16x8_t vqrdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) A32: VQRDMULH.S16 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
+ int16x8_t vqrdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane)
+ A32: VQRDMULH.S16 Qd, Qn, Dm[lane]
+ A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane]
+
To be added.
To be added.
@@ -31143,7 +31507,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqrdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) A32: VQRDMULH.S32 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
+ int32x4_t vqrdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane)
+ A32: VQRDMULH.S32 Qd, Qn, Dm[lane]
+ A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -31182,7 +31550,11 @@
To be added.
To be added.
To be added.
- int32x4_t vqrdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) A32: VQRDMULH.S32 Qd, Qn, Dm[lane] A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
+ int32x4_t vqrdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane)
+ A32: VQRDMULH.S32 Qd, Qn, Dm[lane]
+ A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane]
+
To be added.
To be added.
@@ -31221,7 +31593,11 @@
To be added.
To be added.
To be added.
- int16x4_t vqrdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) A32: VQRDMULH.S16 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
+ int16x4_t vqrdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane)
+ A32: VQRDMULH.S16 Dd, Dn, Dm[lane]
+ A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -31260,7 +31636,11 @@
To be added.
To be added.
To be added.
- int16x4_t vqrdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) A32: VQRDMULH.S16 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
+ int16x4_t vqrdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane)
+ A32: VQRDMULH.S16 Dd, Dn, Dm[lane]
+ A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane]
+
To be added.
To be added.
@@ -31299,7 +31679,11 @@
To be added.
To be added.
To be added.
- int32x2_t vqrdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) A32: VQRDMULH.S32 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
+ int32x2_t vqrdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane)
+ A32: VQRDMULH.S32 Dd, Dn, Dm[lane]
+ A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -31338,7 +31722,11 @@
To be added.
To be added.
To be added.
- int32x2_t vqrdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) A32: VQRDMULH.S32 Dd, Dn, Dm[lane] A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
+ int32x2_t vqrdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane)
+ A32: VQRDMULH.S32 Dd, Dn, Dm[lane]
+ A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane]
+
To be added.
To be added.
@@ -31368,7 +31756,11 @@
To be added.
To be added.
- int16x8_t vqrdmulhq_s16 (int16x8_t a, int16x8_t b) A32: VQRDMULH.S16 Qd, Qn, Qm A64: SQRDMULH Vd.8H, Vn.8H, Vm.8H
+
+ int16x8_t vqrdmulhq_s16 (int16x8_t a, int16x8_t b)
+ A32: VQRDMULH.S16 Qd, Qn, Qm
+ A64: SQRDMULH Vd.8H, Vn.8H, Vm.8H
+
To be added.
To be added.
@@ -31398,7 +31790,11 @@
To be added.
To be added.
- int32x4_t vqrdmulhq_s32 (int32x4_t a, int32x4_t b) A32: VQRDMULH.S32 Qd, Qn, Qm A64: SQRDMULH Vd.4S, Vn.4S, Vm.4S
+
+ int32x4_t vqrdmulhq_s32 (int32x4_t a, int32x4_t b)
+ A32: VQRDMULH.S32 Qd, Qn, Qm
+ A64: SQRDMULH Vd.4S, Vn.4S, Vm.4S
+
To be added.
To be added.
@@ -31428,7 +31824,11 @@
To be added.
To be added.
- int16x4_t vqrdmulh_s16 (int16x4_t a, int16x4_t b) A32: VQRDMULH.S16 Dd, Dn, Dm A64: SQRDMULH Vd.4H, Vn.4H, Vm.4H
+
+ int16x4_t vqrdmulh_s16 (int16x4_t a, int16x4_t b)
+ A32: VQRDMULH.S16 Dd, Dn, Dm
+ A64: SQRDMULH Vd.4H, Vn.4H, Vm.4H
+
To be added.
To be added.
@@ -31458,7 +31858,11 @@
To be added.
To be added.
- int32x2_t vqrdmulh_s32 (int32x2_t a, int32x2_t b) A32: VQRDMULH.S32 Dd, Dn, Dm A64: SQRDMULH Vd.2S, Vn.2S, Vm.2S
+
+ int32x2_t vqrdmulh_s32 (int32x2_t a, int32x2_t b)
+ A32: VQRDMULH.S32 Dd, Dn, Dm
+ A64: SQRDMULH Vd.2S, Vn.2S, Vm.2S
+
To be added.
To be added.
@@ -37656,7 +38060,11 @@
To be added.
- int16x8_t vrev32q_s16 (int16x8_t vec) A32: VREV32.16 Qd, Qm A64: REV32 Vd.8H, Vn.8H
+
+ int16x8_t vrev32q_s16 (int16x8_t vec)
+ A32: VREV32.16 Qd, Qm
+ A64: REV32 Vd.8H, Vn.8H
+
To be added.
To be added.
@@ -37684,7 +38092,11 @@
To be added.
- int16x8_t vrev64q_s16 (int16x8_t vec) A32: VREV64.16 Qd, Qm A64: REV64 Vd.8H, Vn.8H
+
+ int16x8_t vrev64q_s16 (int16x8_t vec)
+ A32: VREV64.16 Qd, Qm
+ A64: REV64 Vd.8H, Vn.8H
+
To be added.
To be added.
@@ -37712,7 +38124,11 @@
To be added.
- uint16x8_t vrev32q_u16 (uint16x8_t vec) A32: VREV32.16 Qd, Qm A64: REV32 Vd.8H, Vn.8H
+
+ uint16x8_t vrev32q_u16 (uint16x8_t vec)
+ A32: VREV32.16 Qd, Qm
+ A64: REV32 Vd.8H, Vn.8H
+
To be added.
To be added.
@@ -37740,7 +38156,11 @@
To be added.
- uint16x8_t vrev64q_u16 (uint16x8_t vec) A32: VREV64.16 Qd, Qm A64: REV64 Vd.8H, Vn.8H
+
+ uint16x8_t vrev64q_u16 (uint16x8_t vec)
+ A32: VREV64.16 Qd, Qm
+ A64: REV64 Vd.8H, Vn.8H
+
To be added.
To be added.
@@ -37768,7 +38188,11 @@
To be added.
- int16x4_t vrev32_s16 (int16x4_t vec) A32: VREV32.16 Dd, Dm A64: REV32 Vd.4H, Vn.4H
+
+ int16x4_t vrev32_s16 (int16x4_t vec)
+ A32: VREV32.16 Dd, Dm
+ A64: REV32 Vd.4H, Vn.4H
+
To be added.
To be added.
@@ -37796,7 +38220,11 @@
To be added.
- int16x4_t vrev64_s16 (int16x4_t vec) A32: VREV64.16 Dd, Dm A64: REV64 Vd.4H, Vn.4H
+
+ int16x4_t vrev64_s16 (int16x4_t vec)
+ A32: VREV64.16 Dd, Dm
+ A64: REV64 Vd.4H, Vn.4H
+
To be added.
To be added.
@@ -37824,7 +38252,11 @@
To be added.
- uint16x4_t vrev32_u16 (uint16x4_t vec) A32: VREV32.16 Dd, Dm A64: REV32 Vd.4H, Vn.4H
+
+ uint16x4_t vrev32_u16 (uint16x4_t vec)
+ A32: VREV32.16 Dd, Dm
+ A64: REV32 Vd.4H, Vn.4H
+
To be added.
To be added.
@@ -37852,7 +38284,11 @@
To be added.
- uint16x4_t vrev64_u16 (uint16x4_t vec) A32: VREV64.16 Dd, Dm A64: REV64 Vd.4H, Vn.4H
+
+ uint16x4_t vrev64_u16 (uint16x4_t vec)
+ A32: VREV64.16 Dd, Dm
+ A64: REV64 Vd.4H, Vn.4H
+
To be added.
To be added.
@@ -37880,7 +38316,11 @@
To be added.
- int32x4_t vrev64q_s32 (int32x4_t vec) A32: VREV64.32 Qd, Qm A64: REV64 Vd.4S, Vn.4S
+
+ int32x4_t vrev64q_s32 (int32x4_t vec)
+ A32: VREV64.32 Qd, Qm
+ A64: REV64 Vd.4S, Vn.4S
+
To be added.
To be added.
@@ -37908,7 +38348,11 @@
To be added.
- uint32x4_t vrev64q_u32 (uint32x4_t vec) A32: VREV64.32 Qd, Qm A64: REV64 Vd.4S, Vn.4S
+
+ uint32x4_t vrev64q_u32 (uint32x4_t vec)
+ A32: VREV64.32 Qd, Qm
+ A64: REV64 Vd.4S, Vn.4S
+
To be added.
To be added.
@@ -37936,7 +38380,11 @@
To be added.
- int32x2_t vrev64_s32 (int32x2_t vec) A32: VREV64.32 Dd, Dm A64: REV64 Vd.2S, Vn.2S
+
+ int32x2_t vrev64_s32 (int32x2_t vec)
+ A32: VREV64.32 Dd, Dm
+ A64: REV64 Vd.2S, Vn.2S
+
To be added.
To be added.
@@ -37964,7 +38412,11 @@
To be added.
- uint32x2_t vrev64_u32 (uint32x2_t vec) A32: VREV64.32 Dd, Dm A64: REV64 Vd.2S, Vn.2S
+
+ uint32x2_t vrev64_u32 (uint32x2_t vec)
+ A32: VREV64.32 Dd, Dm
+ A64: REV64 Vd.2S, Vn.2S
+
To be added.
To be added.
@@ -37992,7 +38444,11 @@
To be added.
- int8x16_t vrev16q_s8 (int8x16_t vec) A32: VREV16.8 Qd, Qm A64: REV16 Vd.16B, Vn.16B
+
+ int8x16_t vrev16q_s8 (int8x16_t vec)
+ A32: VREV16.8 Qd, Qm
+ A64: REV16 Vd.16B, Vn.16B
+
To be added.
To be added.
@@ -38020,7 +38476,11 @@
To be added.
- int8x16_t vrev32q_s8 (int8x16_t vec) A32: VREV32.8 Qd, Qm A64: REV32 Vd.16B, Vn.16B
+
+ int8x16_t vrev32q_s8 (int8x16_t vec)
+ A32: VREV32.8 Qd, Qm
+ A64: REV32 Vd.16B, Vn.16B
+
To be added.
To be added.
@@ -38048,7 +38508,11 @@
To be added.
- int8x16_t vrev64q_s8 (int8x16_t vec) A32: VREV64.8 Qd, Qm A64: REV64 Vd.16B, Vn.16B
+
+ int8x16_t vrev64q_s8 (int8x16_t vec)
+ A32: VREV64.8 Qd, Qm
+ A64: REV64 Vd.16B, Vn.16B
+
To be added.
To be added.
@@ -38076,7 +38540,11 @@
To be added.
- uint8x16_t vrev16q_u8 (uint8x16_t vec) A32: VREV16.8 Qd, Qm A64: REV16 Vd.16B, Vn.16B
+
+ uint8x16_t vrev16q_u8 (uint8x16_t vec)
+ A32: VREV16.8 Qd, Qm
+ A64: REV16 Vd.16B, Vn.16B
+
To be added.
To be added.
@@ -38104,7 +38572,11 @@
To be added.
- uint8x16_t vrev32q_u8 (uint8x16_t vec) A32: VREV32.8 Qd, Qm A64: REV32 Vd.16B, Vn.16B
+
+ uint8x16_t vrev32q_u8 (uint8x16_t vec)
+ A32: VREV32.8 Qd, Qm
+ A64: REV32 Vd.16B, Vn.16B
+
To be added.
To be added.
@@ -38132,7 +38604,11 @@
To be added.
- uint8x16_t vrev64q_u8 (uint8x16_t vec) A32: VREV64.8 Qd, Qm A64: REV64 Vd.16B, Vn.16B
+
+ uint8x16_t vrev64q_u8 (uint8x16_t vec)
+ A32: VREV64.8 Qd, Qm
+ A64: REV64 Vd.16B, Vn.16B
+
To be added.
To be added.
@@ -38160,7 +38636,11 @@
To be added.
- int8x8_t vrev16_s8 (int8x8_t vec) A32: VREV16.8 Dd, Dm A64: REV16 Vd.8B, Vn.8B
+
+ int8x8_t vrev16_s8 (int8x8_t vec)
+ A32: VREV16.8 Dd, Dm
+ A64: REV16 Vd.8B, Vn.8B
+
To be added.
To be added.
@@ -38188,7 +38668,11 @@
To be added.
- int8x8_t vrev32_s8 (int8x8_t vec) A32: VREV32.8 Dd, Dm A64: REV32 Vd.8B, Vn.8B
+
+ int8x8_t vrev32_s8 (int8x8_t vec)
+ A32: VREV32.8 Dd, Dm
+ A64: REV32 Vd.8B, Vn.8B
+
To be added.
To be added.
@@ -38216,7 +38700,11 @@
To be added.
- int8x8_t vrev64_s8 (int8x8_t vec) A32: VREV64.8 Dd, Dm A64: REV64 Vd.8B, Vn.8B
+
+ int8x8_t vrev64_s8 (int8x8_t vec)
+ A32: VREV64.8 Dd, Dm
+ A64: REV64 Vd.8B, Vn.8B
+
To be added.
To be added.
@@ -38244,7 +38732,11 @@
To be added.
- uint8x8_t vrev16_u8 (uint8x8_t vec) A32: VREV16.8 Dd, Dm A64: REV16 Vd.8B, Vn.8B
+
+ uint8x8_t vrev16_u8 (uint8x8_t vec)
+ A32: VREV16.8 Dd, Dm
+ A64: REV16 Vd.8B, Vn.8B
+
To be added.
To be added.
@@ -38272,7 +38764,11 @@
To be added.
- uint8x8_t vrev32_u8 (uint8x8_t vec) A32: VREV32.8 Dd, Dm A64: REV32 Vd.8B, Vn.8B
+
+ uint8x8_t vrev32_u8 (uint8x8_t vec)
+ A32: VREV32.8 Dd, Dm
+ A64: REV32 Vd.8B, Vn.8B
+
To be added.
To be added.
@@ -38300,7 +38796,11 @@
To be added.
- uint8x8_t vrev64_u8 (uint8x8_t vec) A32: VREV64.8 Dd, Dm A64: REV64 Vd.8B, Vn.8B
+
+ uint8x8_t vrev64_u8 (uint8x8_t vec)
+ A32: VREV64.8 Dd, Dm
+ A64: REV64 Vd.8B, Vn.8B
+
To be added.
To be added.
@@ -40067,7 +40567,11 @@
To be added.
To be added.
To be added.
- uint8x16_t vsliq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) A32: VSLI.8 Qd, Qm, #n A64: SLI Vd.16B, Vn.16B, #n
+
+ uint8x16_t vsliq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n))
+ A32: VSLI.8 Qd, Qm, #n
+ A64: SLI Vd.16B, Vn.16B, #n
+
To be added.
To be added.
@@ -40106,7 +40610,11 @@
To be added.
To be added.
To be added.
- int16x8_t vsliq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) A32: VSLI.16 Qd, Qm, #n A64: SLI Vd.8H, Vn.8H, #n
+
+ int16x8_t vsliq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n))
+ A32: VSLI.16 Qd, Qm, #n
+ A64: SLI Vd.8H, Vn.8H, #n
+
To be added.
To be added.
@@ -40145,7 +40653,11 @@
To be added.
To be added.
To be added.
- int32x4_t vsliq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n)) A32: VSLI.32 Qd, Qm, #n A64: SLI Vd.4S, Vn.4S, #n
+
+ int32x4_t vsliq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n))
+ A32: VSLI.32 Qd, Qm, #n
+ A64: SLI Vd.4S, Vn.4S, #n
+
To be added.
To be added.
@@ -40184,7 +40696,11 @@
To be added.
To be added.
To be added.
- int64x2_t vsliq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) A32: VSLI.64 Qd, Qm, #n A64: SLI Vd.2D, Vn.2D, #n
+
+ int64x2_t vsliq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n))
+ A32: VSLI.64 Qd, Qm, #n
+ A64: SLI Vd.2D, Vn.2D, #n
+
To be added.
To be added.
@@ -40223,7 +40739,11 @@
To be added.
To be added.
To be added.
- int8x16_t vsliq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n)) A32: VSLI.8 Qd, Qm, #n A64: SLI Vd.16B, Vn.16B, #n
+
+ int8x16_t vsliq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n))
+ A32: VSLI.8 Qd, Qm, #n
+ A64: SLI Vd.16B, Vn.16B, #n
+
To be added.
To be added.
@@ -40262,7 +40782,11 @@
To be added.
To be added.
To be added.
- uint16x8_t vsliq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) A32: VSLI.16 Qd, Qm, #n A64: SLI Vd.8H, Vn.8H, #n
+
+ uint16x8_t vsliq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n))
+ A32: VSLI.16 Qd, Qm, #n
+ A64: SLI Vd.8H, Vn.8H, #n
+
To be added.
To be added.
@@ -40301,7 +40825,11 @@
To be added.
To be added.
To be added.
- uint32x4_t vsliq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n)) A32: VSLI.32 Qd, Qm, #n A64: SLI Vd.4S, Vn.4S, #n
+
+ uint32x4_t vsliq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n))
+ A32: VSLI.32 Qd, Qm, #n
+ A64: SLI Vd.4S, Vn.4S, #n
+
To be added.
To be added.
@@ -40340,7 +40868,11 @@
To be added.
To be added.
To be added.
- uint64x2_t vsliq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n)) A32: VSLI.64 Qd, Qm, #n A64: SLI Vd.2D, Vn.2D, #n
+
+ uint64x2_t vsliq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n))
+ A32: VSLI.64 Qd, Qm, #n
+ A64: SLI Vd.2D, Vn.2D, #n
+
To be added.
To be added.
@@ -40379,7 +40911,11 @@
To be added.
To be added.
To be added.
- uint8x8_t vsli_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n)) A32: VSLI.8 Dd, Dm, #n A64: SLI Vd.8B, Vn.8B, #n
+
+ uint8x8_t vsli_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n))
+ A32: VSLI.8 Dd, Dm, #n
+ A64: SLI Vd.8B, Vn.8B, #n
+
To be added.
To be added.
@@ -40418,7 +40954,11 @@
To be added.
To be added.
To be added.
- int16x4_t vsli_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) A32: VSLI.16 Dd, Dm, #n A64: SLI Vd.4H, Vn.4H, #n
+
+ int16x4_t vsli_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n))
+ A32: VSLI.16 Dd, Dm, #n
+ A64: SLI Vd.4H, Vn.4H, #n
+
To be added.
To be added.
@@ -40457,7 +40997,11 @@
To be added.
To be added.
To be added.
- int32x2_t vsli_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n)) A32: VSLI.32 Dd, Dm, #n A64: SLI Vd.2S, Vn.2S, #n
+
+ int32x2_t vsli_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n))
+ A32: VSLI.32 Dd, Dm, #n
+ A64: SLI Vd.2S, Vn.2S, #n
+
To be added.
To be added.
@@ -40496,7 +41040,11 @@
To be added.
To be added.
To be added.
- int8x8_t vsli_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n)) A32: VSLI.8 Dd, Dm, #n A64: SLI Vd.8B, Vn.8B, #n
+
+ int8x8_t vsli_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n))
+ A32: VSLI.8 Dd, Dm, #n
+ A64: SLI Vd.8B, Vn.8B, #n
+
To be added.
To be added.
@@ -40535,7 +41083,11 @@
To be added.
To be added.
To be added.
- uint16x4_t vsli_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n)) A32: VSLI.16 Dd, Dm, #n A64: SLI Vd.4H, Vn.4H, #n
+
+ uint16x4_t vsli_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n))
+ A32: VSLI.16 Dd, Dm, #n
+ A64: SLI Vd.4H, Vn.4H, #n
+
To be added.
To be added.
@@ -40574,7 +41126,11 @@
To be added.
To be added.
To be added.
- uint32x2_t vsli_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) A32: VSLI.32 Dd, Dm, #n A64: SLI Vd.2S, Vn.2S, #n
+
+ uint32x2_t vsli_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n))
+ A32: VSLI.32 Dd, Dm, #n
+ A64: SLI Vd.2S, Vn.2S, #n
+
To be added.
To be added.
@@ -40613,7 +41169,11 @@
To be added.
To be added.
To be added.
- int64_t vslid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) A32: VSLI.64 Dd, Dm, #n A64: SLI Dd, Dn, #n
+
+ int64_t vslid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n))
+ A32: VSLI.64 Dd, Dm, #n
+ A64: SLI Dd, Dn, #n
+
To be added.
To be added.
@@ -40652,7 +41212,11 @@
To be added.
To be added.
To be added.
- uint64_t vslid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) A32: VSLI.64 Dd, Dm, #n A64: SLI Dd, Dn, #n
+
+ uint64_t vslid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n))
+ A32: VSLI.64 Dd, Dm, #n
+ A64: SLI Dd, Dn, #n
+
To be added.
To be added.
@@ -45560,7 +46124,11 @@
To be added.
To be added.
To be added.
- int64_t vsrid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) A32: VSRI.64 Dd, Dm, #n A64: SRI Dd, Dn, #n
+
+ int64_t vsrid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n))
+ A32: VSRI.64 Dd, Dm, #n
+ A64: SRI Dd, Dn, #n
+
To be added.
To be added.
@@ -45599,7 +46167,11 @@
To be added.
To be added.
To be added.
- uint64_t vsrid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) A32: VSRI.64 Dd, Dm, #n A64: SRI Dd, Dn, #n
+
+ uint64_t vsrid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n))
+ A32: VSRI.64 Dd, Dm, #n
+ A64: SRI Dd, Dn, #n
+
To be added.
To be added.
@@ -57223,7 +57795,10 @@
To be added.
To be added.
- uint8x8_t vqtbl4q_u8(uint8x16x4_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
+ uint8x8_t vqtbl4q_u8(uint8x16x4_t t, uint8x8_t idx)
+ A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
To be added.
To be added.
@@ -57250,7 +57825,10 @@
To be added.
To be added.
- uint8x8_t vqtbl3q_u8(uint8x16x3_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
+ uint8x8_t vqtbl3q_u8(uint8x16x3_t t, uint8x8_t idx)
+ A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
To be added.
To be added.
@@ -57277,7 +57855,10 @@
To be added.
To be added.
- uint8x8_t vqtbl2q_u8(uint8x16x2_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
+ uint8x8_t vqtbl2q_u8(uint8x16x2_t t, uint8x8_t idx)
+ A64: TBL Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
To be added.
To be added.
@@ -57304,7 +57885,10 @@
To be added.
To be added.
- int8x8_t vqtbl4q_u8(int8x16x4_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
+ int8x8_t vqtbl4q_u8(int8x16x4_t t, uint8x8_t idx)
+ A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
To be added.
To be added.
@@ -57331,7 +57915,10 @@
To be added.
To be added.
- int8x8_t vqtbl3q_u8(int8x16x3_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
+ int8x8_t vqtbl3q_u8(int8x16x3_t t, uint8x8_t idx)
+ A64: TBL Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
To be added.
To be added.
@@ -57358,7 +57945,10 @@
To be added.
To be added.
- int8x8_t vqtbl2q_u8(int8x16x2_t t, uint8x8_t idx) A64: TBL Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
+ int8x8_t vqtbl2q_u8(int8x16x2_t t, uint8x8_t idx)
+ A64: TBL Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
To be added.
To be added.
@@ -57423,7 +58013,10 @@
To be added.
To be added.
To be added.
- uint8x8_t vqtbx4q_u8(uint8x16x4_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
+ uint8x8_t vqtbx4q_u8(uint8x16x4_t t, uint8x8_t idx)
+ A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
To be added.
To be added.
@@ -57452,7 +58045,10 @@
To be added.
To be added.
To be added.
- uint8x8_t vqtbx3q_u8(uint8x16x3_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
+ uint8x8_t vqtbx3q_u8(uint8x16x3_t t, uint8x8_t idx)
+ A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
To be added.
To be added.
@@ -57481,7 +58077,10 @@
To be added.
To be added.
To be added.
- uint8x8_t vqtbx2q_u8(uint8x16x2_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
+ uint8x8_t vqtbx2q_u8(uint8x16x2_t t, uint8x8_t idx)
+ A64: TBX Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
To be added.
To be added.
@@ -57546,7 +58145,10 @@
To be added.
To be added.
To be added.
- int8x8_t vqtbx4q_u8(int8x16x4_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
+ int8x8_t vqtbx4q_u8(int8x16x4_t t, uint8x8_t idx)
+ A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B}, Vm.8B
+
To be added.
To be added.
@@ -57575,7 +58177,10 @@
To be added.
To be added.
To be added.
- int8x8_t vqtbx3q_u8(int8x16x3_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
+ int8x8_t vqtbx3q_u8(int8x16x3_t t, uint8x8_t idx)
+ A64: TBX Vd.8B, {Vn.16B, Vn+1.16B, Vn+2.16B}, Vm.8B
+
To be added.
To be added.
@@ -57604,7 +58209,10 @@
To be added.
To be added.
To be added.
- int8x8_t vqtbx2q_u8(int8x16x2_t t, uint8x8_t idx) A64: TBX Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
+ int8x8_t vqtbx2q_u8(int8x16x2_t t, uint8x8_t idx)
+ A64: TBX Vd.8B, {Vn.16B, Vn+1.16B}, Vm.8B
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml
index e35c030c719..be7d2fbb86e 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml
@@ -40,7 +40,10 @@
To be added.
To be added.
- __m128i _mm_cmpgt_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(6)
+
+ __m128i _mm_cmpgt_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(6)
+
To be added.
To be added.
@@ -67,7 +70,10 @@
To be added.
To be added.
- __m128i _mm_cmpgt_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(6)
+
+ __m128i _mm_cmpgt_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(6)
+
To be added.
To be added.
@@ -94,7 +100,10 @@
To be added.
To be added.
- __m256i _mm256_cmpgt_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(6)
+
+ __m256i _mm256_cmpgt_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(6)
+
To be added.
To be added.
@@ -121,7 +130,10 @@
To be added.
To be added.
- __m256i _mm256_cmpgt_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(6)
+
+ __m256i _mm256_cmpgt_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(6)
+
To be added.
To be added.
@@ -148,7 +160,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
+ __m128i _mm_cmpge_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -175,7 +190,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
+ __m128i _mm_cmpge_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -202,7 +220,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
+ __m128i _mm_cmpge_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -229,7 +250,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
+ __m128i _mm_cmpge_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -256,7 +280,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
+ __m256i _mm256_cmpge_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -283,7 +310,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
+ __m256i _mm256_cmpge_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -310,7 +340,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
+ __m256i _mm256_cmpge_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -337,7 +370,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
+ __m256i _mm256_cmpge_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -364,7 +400,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
+ __m128i _mm_cmplt_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -391,7 +430,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
+ __m128i _mm_cmplt_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -418,7 +460,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
+ __m128i _mm_cmplt_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -445,7 +490,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
+ __m128i _mm_cmplt_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -472,7 +520,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
+ __m256i _mm256_cmplt_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -499,7 +550,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
+ __m256i _mm256_cmplt_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -526,7 +580,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
+ __m256i _mm256_cmplt_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -553,7 +610,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
+ __m256i _mm256_cmplt_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -580,7 +640,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
+ __m128i _mm_cmple_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -607,7 +670,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
+ __m128i _mm_cmple_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -634,7 +700,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
+ __m128i _mm_cmple_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -661,7 +730,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
+ __m128i _mm_cmple_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -688,7 +760,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
+ __m256i _mm256_cmple_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -715,7 +790,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
+ __m256i _mm256_cmple_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -742,7 +820,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
+ __m256i _mm256_cmple_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -769,7 +850,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
+ __m256i _mm256_cmple_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -796,7 +880,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epu8 (__m128i a, __m128i b) VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
+ __m128i _mm_cmpne_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -823,7 +910,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epi16 (__m128i a, __m128i b) VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
+ __m128i _mm_cmpne_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -850,7 +940,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epi8 (__m128i a, __m128i b) VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
+ __m128i _mm_cmpne_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -877,7 +970,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epu16 (__m128i a, __m128i b) VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
+ __m128i _mm_cmpne_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -904,7 +1000,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epu8 (__m256i a, __m256i b) VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
+ __m256i _mm256_cmpne_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -931,7 +1030,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epi16 (__m256i a, __m256i b) VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
+ __m256i _mm256_cmpne_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -958,7 +1060,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epi8 (__m256i a, __m256i b) VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
+ __m256i _mm256_cmpne_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -985,7 +1090,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epu16 (__m256i a, __m256i b) VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
+ __m256i _mm256_cmpne_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -1010,7 +1118,10 @@
To be added.
- __m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1035,7 +1146,10 @@
To be added.
- __m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1060,7 +1174,10 @@
To be added.
- __m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1085,7 +1202,10 @@
To be added.
- __m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1110,7 +1230,10 @@
To be added.
- __m128i _mm_cvtusepi16_epi8 (__m128i a) VPMOVUWB xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtusepi16_epi8 (__m128i a)
+ VPMOVUWB xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1135,7 +1258,10 @@
To be added.
- __m128i _mm256_cvtusepi16_epi8 (__m256i a) VPMOVUWB xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtusepi16_epi8 (__m256i a)
+ VPMOVUWB xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1160,7 +1286,10 @@
To be added.
- __m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1185,7 +1314,10 @@
To be added.
- __m128i _mm_cvtepi16_epi8 (__m128i a) VPMOVWB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1210,7 +1342,10 @@
To be added.
- __m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1235,7 +1370,10 @@
To be added.
- __m128i _mm256_cvtepi16_epi8 (__m256i a) VPMOVWB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1260,7 +1398,10 @@
To be added.
- __m128i _mm_cvtsepi16_epi8 (__m128i a) VPMOVSWB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm_cvtsepi16_epi8 (__m128i a)
+ VPMOVSWB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1285,7 +1426,10 @@
To be added.
- __m128i _mm256_cvtsepi16_epi8 (__m256i a) VPMOVSWB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtsepi16_epi8 (__m256i a)
+ VPMOVSWB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1333,7 +1477,10 @@
To be added.
To be added.
- __m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b) VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b)
+ VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1360,7 +1507,10 @@
To be added.
To be added.
- __m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b) VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b)
+ VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1389,7 +1539,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b) VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1418,7 +1571,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b) VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1445,7 +1601,10 @@
To be added.
To be added.
- __m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b) VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b)
+ VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1472,7 +1631,10 @@
To be added.
To be added.
- __m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b) VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b)
+ VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1501,7 +1663,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b) VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1530,7 +1695,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b) VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1557,7 +1725,10 @@
To be added.
To be added.
- __m128i _mm_sllv_epi16 (__m128i a, __m128i count) VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_sllv_epi16 (__m128i a, __m128i count)
+ VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1584,7 +1755,10 @@
To be added.
To be added.
- __m128i _mm_sllv_epi16 (__m128i a, __m128i count) VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_sllv_epi16 (__m128i a, __m128i count)
+ VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1611,7 +1785,10 @@
To be added.
To be added.
- __m256i _mm256_sllv_epi16 (__m256i a, __m256i count) VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_sllv_epi16 (__m256i a, __m256i count)
+ VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1638,7 +1815,10 @@
To be added.
To be added.
- __m256i _mm256_sllv_epi16 (__m256i a, __m256i count) VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_sllv_epi16 (__m256i a, __m256i count)
+ VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1665,7 +1845,10 @@
To be added.
To be added.
- __m128i _mm_srav_epi16 (__m128i a, __m128i count) VPSRAVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_srav_epi16 (__m128i a, __m128i count)
+ VPSRAVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1692,7 +1875,10 @@
To be added.
To be added.
- __m256i _mm256_srav_epi16 (__m256i a, __m256i count) VPSRAVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_srav_epi16 (__m256i a, __m256i count)
+ VPSRAVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1719,7 +1905,10 @@
To be added.
To be added.
- __m128i _mm_srlv_epi16 (__m128i a, __m128i count) VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_srlv_epi16 (__m128i a, __m128i count)
+ VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1746,7 +1935,10 @@
To be added.
To be added.
- __m128i _mm_srlv_epi16 (__m128i a, __m128i count) VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_srlv_epi16 (__m128i a, __m128i count)
+ VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1773,7 +1965,10 @@
To be added.
To be added.
- __m256i _mm256_srlv_epi16 (__m256i a, __m256i count) VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_srlv_epi16 (__m256i a, __m256i count)
+ VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1800,7 +1995,10 @@
To be added.
To be added.
- __m256i _mm256_srlv_epi16 (__m256i a, __m256i count) VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_srlv_epi16 (__m256i a, __m256i count)
+ VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -1836,7 +2034,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_dbsad_epu8 (__m128i a, __m128i b, int imm8) VDBPSADBW xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_dbsad_epu8 (__m128i a, __m128i b, int imm8)
+ VDBPSADBW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -1872,7 +2073,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_dbsad_epu8 (__m256i a, __m256i b, int imm8) VDBPSADBW ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_dbsad_epu8 (__m256i a, __m256i b, int imm8)
+ VDBPSADBW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml b/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml
index a70ea98c3a4..4fcb5776267 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml
@@ -44,7 +44,10 @@
To be added.
- __m512i _mm512_abs_epi16 (__m512i a) VPABSW zmm1 {k1}{z}, zmm2/m512
+
+ __m512i _mm512_abs_epi16 (__m512i a)
+ VPABSW zmm1 {k1}{z}, zmm2/m512
+
To be added.
To be added.
@@ -69,7 +72,10 @@
To be added.
- __m512i _mm512_abs_epi8 (__m512i a) VPABSB zmm1 {k1}{z}, zmm2/m512
+
+ __m512i _mm512_abs_epi8 (__m512i a)
+ VPABSB zmm1 {k1}{z}, zmm2/m512
+
To be added.
To be added.
@@ -96,7 +102,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi8 (__m512i a, __m512i b) VPADDB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_add_epi8 (__m512i a, __m512i b)
+ VPADDB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -123,7 +132,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi16 (__m512i a, __m512i b) VPADDW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_add_epi16 (__m512i a, __m512i b)
+ VPADDW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -150,7 +162,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi8 (__m512i a, __m512i b) VPADDB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_add_epi8 (__m512i a, __m512i b)
+ VPADDB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -177,7 +192,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi16 (__m512i a, __m512i b) VPADDW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_add_epi16 (__m512i a, __m512i b)
+ VPADDW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -204,7 +222,10 @@
To be added.
To be added.
- __m512i _mm512_adds_epu8 (__m512i a, __m512i b) VPADDUSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_adds_epu8 (__m512i a, __m512i b)
+ VPADDUSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -231,7 +252,10 @@
To be added.
To be added.
- __m512i _mm512_adds_epi16 (__m512i a, __m512i b) VPADDSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_adds_epi16 (__m512i a, __m512i b)
+ VPADDSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -258,7 +282,10 @@
To be added.
To be added.
- __m512i _mm512_adds_epi8 (__m512i a, __m512i b) VPADDSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_adds_epi8 (__m512i a, __m512i b)
+ VPADDSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -285,7 +312,10 @@
To be added.
To be added.
- __m512i _mm512_adds_epu16 (__m512i a, __m512i b) VPADDUSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_adds_epu16 (__m512i a, __m512i b)
+ VPADDUSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -321,7 +351,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_alignr_epi8 (__m512i a, __m512i b, const int count) VPALIGNR zmm1 {k1}{z}, zmm2, zmm3/m512, imm8
+
+ __m512i _mm512_alignr_epi8 (__m512i a, __m512i b, const int count)
+ VPALIGNR zmm1 {k1}{z}, zmm2, zmm3/m512, imm8
+
To be added.
To be added.
@@ -357,7 +390,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_alignr_epi8 (__m512i a, __m512i b, const int count) VPALIGNR zmm1 {k1}{z}, zmm2, zmm3/m512, imm8
+
+ __m512i _mm512_alignr_epi8 (__m512i a, __m512i b, const int count)
+ VPALIGNR zmm1 {k1}{z}, zmm2, zmm3/m512, imm8
+
To be added.
To be added.
@@ -384,7 +420,10 @@
To be added.
To be added.
- __m512i _mm512_avg_epu8 (__m512i a, __m512i b) VPAVGB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_avg_epu8 (__m512i a, __m512i b)
+ VPAVGB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -411,7 +450,10 @@
To be added.
To be added.
- __m512i _mm512_avg_epu16 (__m512i a, __m512i b) VPAVGW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_avg_epu16 (__m512i a, __m512i b)
+ VPAVGW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -440,7 +482,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epu8 (__m512i a, __m512i b, __m512i mask) VPBLENDMB zmm1 {k1}, zmm2, zmm3/m512
+
+ __m512i _mm512_blendv_epu8 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMB zmm1 {k1}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -469,7 +514,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epi16 (__m512i a, __m512i b, __m512i mask) VPBLENDMW zmm1 {k1}, zmm2, zmm3/m512
+
+ __m512i _mm512_blendv_epi16 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMW zmm1 {k1}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -498,7 +546,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epi8 (__m512i a, __m512i b, __m512i mask) VPBLENDMB zmm1 {k1}, zmm2, zmm3/m512
+
+ __m512i _mm512_blendv_epi8 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMB zmm1 {k1}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -527,7 +578,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epu16 (__m512i a, __m512i b, __m512i mask) VPBLENDMW zmm1 {k1}, zmm2, zmm3/m512
+
+ __m512i _mm512_blendv_epu16 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMW zmm1 {k1}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -552,7 +606,10 @@
To be added.
- __m512i _mm512_broadcastb_epi8 (__m128i a) VPBROADCASTB zmm1 {k1}{z}, xmm2/m8
+
+ __m512i _mm512_broadcastb_epi8 (__m128i a)
+ VPBROADCASTB zmm1 {k1}{z}, xmm2/m8
+
To be added.
To be added.
@@ -577,7 +634,10 @@
To be added.
- __m512i _mm512_broadcastw_epi16 (__m128i a) VPBROADCASTW zmm1 {k1}{z}, xmm2/m16
+
+ __m512i _mm512_broadcastw_epi16 (__m128i a)
+ VPBROADCASTW zmm1 {k1}{z}, xmm2/m16
+
To be added.
To be added.
@@ -602,7 +662,10 @@
To be added.
- __m512i _mm512_broadcastb_epi8 (__m128i a) VPBROADCASTB zmm1 {k1}{z}, xmm2/m8
+
+ __m512i _mm512_broadcastb_epi8 (__m128i a)
+ VPBROADCASTB zmm1 {k1}{z}, xmm2/m8
+
To be added.
To be added.
@@ -627,7 +690,10 @@
To be added.
- __m512i _mm512_broadcastw_epi16 (__m128i a) VPBROADCASTW zmm1 {k1}{z}, xmm2/m16
+
+ __m512i _mm512_broadcastw_epi16 (__m128i a)
+ VPBROADCASTW zmm1 {k1}{z}, xmm2/m16
+
To be added.
To be added.
@@ -654,7 +720,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epu8 (__m512i a, __m512i b) VPCMPEQB k1 {k2}, zmm2, zmm3/m512
+
+ __m512i _mm512_cmpeq_epu8 (__m512i a, __m512i b)
+ VPCMPEQB k1 {k2}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -681,7 +750,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epi16 (__m512i a, __m512i b) VPCMPEQW k1 {k2}, zmm2, zmm3/m512
+
+ __m512i _mm512_cmpeq_epi16 (__m512i a, __m512i b)
+ VPCMPEQW k1 {k2}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -708,7 +780,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epi8 (__m512i a, __m512i b) VPCMPEQB k1 {k2}, zmm2, zmm3/m512
+
+ __m512i _mm512_cmpeq_epi8 (__m512i a, __m512i b)
+ VPCMPEQB k1 {k2}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -735,7 +810,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epu16 (__m512i a, __m512i b) VPCMPEQW k1 {k2}, zmm2, zmm3/m512
+
+ __m512i _mm512_cmpeq_epu16 (__m512i a, __m512i b)
+ VPCMPEQW k1 {k2}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -762,7 +840,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epu8 (__m512i a, __m512i b) VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(6)
+
+ __m512i _mm512_cmpgt_epu8 (__m512i a, __m512i b)
+ VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(6)
+
To be added.
To be added.
@@ -789,7 +870,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epi16 (__m512i a, __m512i b) VPCMPGTW k1 {k2}, zmm2, zmm3/m512
+
+ __m512i _mm512_cmpgt_epi16 (__m512i a, __m512i b)
+ VPCMPGTW k1 {k2}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -816,7 +900,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epi8 (__m512i a, __m512i b) VPCMPGTB k1 {k2}, zmm2, zmm3/m512
+
+ __m512i _mm512_cmpgt_epi8 (__m512i a, __m512i b)
+ VPCMPGTB k1 {k2}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -843,7 +930,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epu16 (__m512i a, __m512i b) VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(6)
+
+ __m512i _mm512_cmpgt_epu16 (__m512i a, __m512i b)
+ VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(6)
+
To be added.
To be added.
@@ -870,7 +960,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epu8 (__m512i a, __m512i b) VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
+ __m512i _mm512_cmpge_epu8 (__m512i a, __m512i b)
+ VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
To be added.
To be added.
@@ -897,7 +990,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epi16 (__m512i a, __m512i b) VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
+ __m512i _mm512_cmpge_epi16 (__m512i a, __m512i b)
+ VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
To be added.
To be added.
@@ -924,7 +1020,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epi8 (__m512i a, __m512i b) VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
+ __m512i _mm512_cmpge_epi8 (__m512i a, __m512i b)
+ VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
To be added.
To be added.
@@ -951,7 +1050,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epu16 (__m512i a, __m512i b) VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
+ __m512i _mm512_cmpge_epu16 (__m512i a, __m512i b)
+ VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(5)
+
To be added.
To be added.
@@ -978,7 +1080,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epu8 (__m512i a, __m512i b) VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
+ __m512i _mm512_cmplt_epu8 (__m512i a, __m512i b)
+ VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
To be added.
To be added.
@@ -1005,7 +1110,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epi16 (__m512i a, __m512i b) VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
+ __m512i _mm512_cmplt_epi16 (__m512i a, __m512i b)
+ VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
To be added.
To be added.
@@ -1032,7 +1140,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epi8 (__m512i a, __m512i b) VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
+ __m512i _mm512_cmplt_epi8 (__m512i a, __m512i b)
+ VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
To be added.
To be added.
@@ -1059,7 +1170,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epu16 (__m512i a, __m512i b) VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
+ __m512i _mm512_cmplt_epu16 (__m512i a, __m512i b)
+ VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(1)
+
To be added.
To be added.
@@ -1086,7 +1200,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epu8 (__m512i a, __m512i b) VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
+ __m512i _mm512_cmple_epu8 (__m512i a, __m512i b)
+ VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
To be added.
To be added.
@@ -1113,7 +1230,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epi16 (__m512i a, __m512i b) VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
+ __m512i _mm512_cmple_epi16 (__m512i a, __m512i b)
+ VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
To be added.
To be added.
@@ -1140,7 +1260,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epi8 (__m512i a, __m512i b) VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
+ __m512i _mm512_cmple_epi8 (__m512i a, __m512i b)
+ VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
To be added.
To be added.
@@ -1167,7 +1290,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epu16 (__m512i a, __m512i b) VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
+ __m512i _mm512_cmple_epu16 (__m512i a, __m512i b)
+ VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(2)
+
To be added.
To be added.
@@ -1194,7 +1320,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epu8 (__m512i a, __m512i b) VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
+ __m512i _mm512_cmpne_epu8 (__m512i a, __m512i b)
+ VPCMPUB k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
To be added.
To be added.
@@ -1221,7 +1350,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epi16 (__m512i a, __m512i b) VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
+ __m512i _mm512_cmpne_epi16 (__m512i a, __m512i b)
+ VPCMPW k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
To be added.
To be added.
@@ -1248,7 +1380,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epi8 (__m512i a, __m512i b) VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
+ __m512i _mm512_cmpne_epi8 (__m512i a, __m512i b)
+ VPCMPB k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
To be added.
To be added.
@@ -1275,7 +1410,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epu16 (__m512i a, __m512i b) VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
+ __m512i _mm512_cmpne_epu16 (__m512i a, __m512i b)
+ VPCMPUW k1 {k2}, zmm2, zmm3/m512, imm8(4)
+
To be added.
To be added.
@@ -1300,7 +1438,10 @@
To be added.
- __m256i _mm512_cvtepi16_epi8 (__m512i a) VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi16_epi8 (__m512i a)
+ VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1325,7 +1466,10 @@
To be added.
- __m256i _mm512_cvtepi16_epi8 (__m512i a) VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi16_epi8 (__m512i a)
+ VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1350,7 +1494,10 @@
To be added.
- __m256i _mm512_cvtusepi16_epi8 (__m512i a) VPMOVUWB ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtusepi16_epi8 (__m512i a)
+ VPMOVUWB ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1375,7 +1522,10 @@
To be added.
- __m256i _mm512_cvtepi16_epi8 (__m512i a) VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi16_epi8 (__m512i a)
+ VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1400,7 +1550,10 @@
To be added.
- __m256i _mm512_cvtepi16_epi8 (__m512i a) VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi16_epi8 (__m512i a)
+ VPMOVWB ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1425,7 +1578,10 @@
To be added.
- __m256i _mm512_cvtsepi16_epi8 (__m512i a) VPMOVSWB ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtsepi16_epi8 (__m512i a)
+ VPMOVSWB ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1450,7 +1606,10 @@
To be added.
- __m512i _mm512_cvtepu8_epi16 (__m128i a) VPMOVZXBW zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepu8_epi16 (__m128i a)
+ VPMOVZXBW zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -1475,7 +1634,10 @@
To be added.
- __m512i _mm512_cvtepi8_epi16 (__m128i a) VPMOVSXBW zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepi8_epi16 (__m128i a)
+ VPMOVSXBW zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -1500,7 +1662,10 @@
To be added.
- __m512i _mm512_cvtepu8_epi16 (__m128i a) VPMOVZXBW zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepu8_epi16 (__m128i a)
+ VPMOVZXBW zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -1525,7 +1690,10 @@
To be added.
- __m512i _mm512_cvtepi8_epi16 (__m128i a) VPMOVSXBW zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepi8_epi16 (__m128i a)
+ VPMOVSXBW zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -1570,7 +1738,10 @@
To be added.
- __m512i _mm512_loadu_epi8 (__m512i const * mem_addr) VMOVDQU8 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi8 (__m512i const * mem_addr)
+ VMOVDQU8 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -1594,7 +1765,10 @@
To be added.
- __m512i _mm512_loadu_epi16 (__m512i const * mem_addr) VMOVDQU16 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi16 (__m512i const * mem_addr)
+ VMOVDQU16 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -1618,7 +1792,10 @@
To be added.
- __m512i _mm512_loadu_epi8 (__m512i const * mem_addr) VMOVDQU8 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi8 (__m512i const * mem_addr)
+ VMOVDQU8 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -1642,7 +1819,10 @@
To be added.
- __m512i _mm512_loadu_epi16 (__m512i const * mem_addr) VMOVDQU16 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi16 (__m512i const * mem_addr)
+ VMOVDQU16 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -1669,7 +1849,10 @@
To be added.
To be added.
- __m512i _mm512_max_epu8 (__m512i a, __m512i b) VPMAXUB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_max_epu8 (__m512i a, __m512i b)
+ VPMAXUB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1696,7 +1879,10 @@
To be added.
To be added.
- __m512i _mm512_max_epi16 (__m512i a, __m512i b) VPMAXSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_max_epi16 (__m512i a, __m512i b)
+ VPMAXSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1723,7 +1909,10 @@
To be added.
To be added.
- __m512i _mm512_max_epi8 (__m512i a, __m512i b) VPMAXSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_max_epi8 (__m512i a, __m512i b)
+ VPMAXSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1750,7 +1939,10 @@
To be added.
To be added.
- __m512i _mm512_max_epu16 (__m512i a, __m512i b) VPMAXUW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_max_epu16 (__m512i a, __m512i b)
+ VPMAXUW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1777,7 +1969,10 @@
To be added.
To be added.
- __m512i _mm512_min_epu8 (__m512i a, __m512i b) VPMINUB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_min_epu8 (__m512i a, __m512i b)
+ VPMINUB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1804,7 +1999,10 @@
To be added.
To be added.
- __m512i _mm512_min_epi16 (__m512i a, __m512i b) VPMINSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_min_epi16 (__m512i a, __m512i b)
+ VPMINSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1831,7 +2029,10 @@
To be added.
To be added.
- __m512i _mm512_min_epi8 (__m512i a, __m512i b) VPMINSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_min_epi8 (__m512i a, __m512i b)
+ VPMINSB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1858,7 +2059,10 @@
To be added.
To be added.
- __m512i _mm512_min_epu16 (__m512i a, __m512i b) VPMINUW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_min_epu16 (__m512i a, __m512i b)
+ VPMINUW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1885,7 +2089,10 @@
To be added.
To be added.
- __m512i _mm512_maddubs_epi16 (__m512i a, __m512i b) VPMADDUBSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_maddubs_epi16 (__m512i a, __m512i b)
+ VPMADDUBSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1912,7 +2119,10 @@
To be added.
To be added.
- __m512i _mm512_madd_epi16 (__m512i a, __m512i b) VPMADDWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_madd_epi16 (__m512i a, __m512i b)
+ VPMADDWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1939,7 +2149,10 @@
To be added.
To be added.
- __m512i _mm512_mulhi_epi16 (__m512i a, __m512i b) VPMULHW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_mulhi_epi16 (__m512i a, __m512i b)
+ VPMULHW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1966,7 +2179,10 @@
To be added.
To be added.
- __m512i _mm512_mulhi_epu16 (__m512i a, __m512i b) VPMULHUW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_mulhi_epu16 (__m512i a, __m512i b)
+ VPMULHUW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1993,7 +2209,10 @@
To be added.
To be added.
- __m512i _mm512_mulhrs_epi16 (__m512i a, __m512i b) VPMULHRSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_mulhrs_epi16 (__m512i a, __m512i b)
+ VPMULHRSW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2020,7 +2239,10 @@
To be added.
To be added.
- __m512i _mm512_mullo_epi16 (__m512i a, __m512i b) VPMULLW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_mullo_epi16 (__m512i a, __m512i b)
+ VPMULLW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2047,7 +2269,10 @@
To be added.
To be added.
- __m512i _mm512_mullo_epi16 (__m512i a, __m512i b) VPMULLW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_mullo_epi16 (__m512i a, __m512i b)
+ VPMULLW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2074,7 +2299,10 @@
To be added.
To be added.
- __m512i _mm512_packs_epi16 (__m512i a, __m512i b) VPACKSSWB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_packs_epi16 (__m512i a, __m512i b)
+ VPACKSSWB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2101,7 +2329,10 @@
To be added.
To be added.
- __m512i _mm512_packs_epi32 (__m512i a, __m512i b) VPACKSSDW zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_packs_epi32 (__m512i a, __m512i b)
+ VPACKSSDW zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -2128,7 +2359,10 @@
To be added.
To be added.
- __m512i _mm512_packus_epi16 (__m512i a, __m512i b) VPACKUSWB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_packus_epi16 (__m512i a, __m512i b)
+ VPACKUSWB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2155,7 +2389,10 @@
To be added.
To be added.
- __m512i _mm512_packus_epi32 (__m512i a, __m512i b) VPACKUSDW zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_packus_epi32 (__m512i a, __m512i b)
+ VPACKUSDW zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -2182,7 +2419,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar32x16_epi16 (__m512i a, __m512i b) VPERMW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutevar32x16_epi16 (__m512i a, __m512i b)
+ VPERMW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2209,7 +2449,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar32x16_epi16 (__m512i a, __m512i b) VPERMW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutevar32x16_epi16 (__m512i a, __m512i b)
+ VPERMW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2238,7 +2481,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi16 (__m512i a, __m512i idx, __m512i b) VPERMI2W zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2W zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutex2var_epi16 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2W zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2W zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2267,7 +2513,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi16 (__m512i a, __m512i idx, __m512i b) VPERMI2W zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2W zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutex2var_epi16 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2W zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2W zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2301,7 +2550,10 @@
To be added.
To be added.
- __m512i _mm512_slli_epi16 (__m512i a, int imm8) VPSLLW zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_slli_epi16 (__m512i a, int imm8)
+ VPSLLW zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -2328,7 +2580,10 @@
To be added.
To be added.
- __m512i _mm512_sll_epi16 (__m512i a, __m128i count) VPSLLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_sll_epi16 (__m512i a, __m128i count)
+ VPSLLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -2362,7 +2617,10 @@
To be added.
To be added.
- __m512i _mm512_slli_epi16 (__m512i a, int imm8) VPSLLW zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_slli_epi16 (__m512i a, int imm8)
+ VPSLLW zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -2389,7 +2647,10 @@
To be added.
To be added.
- __m512i _mm512_sll_epi16 (__m512i a, __m128i count) VPSLLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_sll_epi16 (__m512i a, __m128i count)
+ VPSLLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -2423,7 +2684,10 @@
To be added.
To be added.
- __m512i _mm512_bslli_epi128 (__m512i a, const int imm8) VPSLLDQ zmm1, zmm2/m512, imm8
+
+ __m512i _mm512_bslli_epi128 (__m512i a, const int imm8)
+ VPSLLDQ zmm1, zmm2/m512, imm8
+
To be added.
To be added.
@@ -2457,7 +2721,10 @@
To be added.
To be added.
- __m512i _mm512_bslli_epi128 (__m512i a, const int imm8) VPSLLDQ zmm1, zmm2/m512, imm8
+
+ __m512i _mm512_bslli_epi128 (__m512i a, const int imm8)
+ VPSLLDQ zmm1, zmm2/m512, imm8
+
To be added.
To be added.
@@ -2484,7 +2751,10 @@
To be added.
To be added.
- __m512i _mm512_sllv_epi16 (__m512i a, __m512i count) VPSLLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sllv_epi16 (__m512i a, __m512i count)
+ VPSLLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2511,7 +2781,10 @@
To be added.
To be added.
- __m512i _mm512_sllv_epi16 (__m512i a, __m512i count) VPSLLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sllv_epi16 (__m512i a, __m512i count)
+ VPSLLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2545,7 +2818,10 @@
To be added.
To be added.
- __m512i _mm512_srai_epi16 (__m512i a, int imm8) VPSRAW zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srai_epi16 (__m512i a, int imm8)
+ VPSRAW zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -2572,7 +2848,10 @@
To be added.
To be added.
- _mm512_sra_epi16 (__m512i a, __m128i count) VPSRAW zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ _mm512_sra_epi16 (__m512i a, __m128i count)
+ VPSRAW zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -2599,7 +2878,10 @@
To be added.
To be added.
- __m512i _mm512_srav_epi16 (__m512i a, __m512i count) VPSRAVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_srav_epi16 (__m512i a, __m512i count)
+ VPSRAVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2633,7 +2915,10 @@
To be added.
To be added.
- __m512i _mm512_srli_epi16 (__m512i a, int imm8) VPSRLW zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srli_epi16 (__m512i a, int imm8)
+ VPSRLW zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -2660,7 +2945,10 @@
To be added.
To be added.
- __m512i _mm512_srl_epi16 (__m512i a, __m128i count) VPSRLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_srl_epi16 (__m512i a, __m128i count)
+ VPSRLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -2694,7 +2982,10 @@
To be added.
To be added.
- __m512i _mm512_srli_epi16 (__m512i a, int imm8) VPSRLW zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srli_epi16 (__m512i a, int imm8)
+ VPSRLW zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -2721,7 +3012,10 @@
To be added.
To be added.
- __m512i _mm512_srl_epi16 (__m512i a, __m128i count) VPSRLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_srl_epi16 (__m512i a, __m128i count)
+ VPSRLW zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -2755,7 +3049,10 @@
To be added.
To be added.
- __m512i _mm512_bsrli_epi128 (__m512i a, const int imm8) VPSRLDQ zmm1, zmm2/m128, imm8
+
+ __m512i _mm512_bsrli_epi128 (__m512i a, const int imm8)
+ VPSRLDQ zmm1, zmm2/m128, imm8
+
To be added.
To be added.
@@ -2789,7 +3086,10 @@
To be added.
To be added.
- __m512i _mm512_bsrli_epi128 (__m512i a, const int imm8) VPSRLDQ zmm1, zmm2/m128, imm8
+
+ __m512i _mm512_bsrli_epi128 (__m512i a, const int imm8)
+ VPSRLDQ zmm1, zmm2/m128, imm8
+
To be added.
To be added.
@@ -2816,7 +3116,10 @@
To be added.
To be added.
- __m512i _mm512_srlv_epi16 (__m512i a, __m512i count) VPSRLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_srlv_epi16 (__m512i a, __m512i count)
+ VPSRLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2843,7 +3146,10 @@
To be added.
To be added.
- __m512i _mm512_srlv_epi16 (__m512i a, __m512i count) VPSRLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_srlv_epi16 (__m512i a, __m512i count)
+ VPSRLVW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2870,7 +3176,10 @@
To be added.
To be added.
- __m512i _mm512_shuffle_epi8 (__m512i a, __m512i b) VPSHUFB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_shuffle_epi8 (__m512i a, __m512i b)
+ VPSHUFB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2897,7 +3206,10 @@
To be added.
To be added.
- __m512i _mm512_shuffle_epi8 (__m512i a, __m512i b) VPSHUFB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_shuffle_epi8 (__m512i a, __m512i b)
+ VPSHUFB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -2931,7 +3243,10 @@
To be added.
To be added.
- __m512i _mm512_shufflehi_epi16 (__m512i a, const int imm8) VPSHUFHW zmm1 {k1}{z}, zmm2/m512, imm8
+
+ __m512i _mm512_shufflehi_epi16 (__m512i a, const int imm8)
+ VPSHUFHW zmm1 {k1}{z}, zmm2/m512, imm8
+
To be added.
To be added.
@@ -2965,7 +3280,10 @@
To be added.
To be added.
- __m512i _mm512_shufflehi_epi16 (__m512i a, const int imm8) VPSHUFHW zmm1 {k1}{z}, zmm2/m512, imm8
+
+ __m512i _mm512_shufflehi_epi16 (__m512i a, const int imm8)
+ VPSHUFHW zmm1 {k1}{z}, zmm2/m512, imm8
+
To be added.
To be added.
@@ -2999,7 +3317,10 @@
To be added.
To be added.
- __m512i _mm512_shufflelo_epi16 (__m512i a, const int imm8) VPSHUFLW zmm1 {k1}{z}, zmm2/m512, imm8
+
+ __m512i _mm512_shufflelo_epi16 (__m512i a, const int imm8)
+ VPSHUFLW zmm1 {k1}{z}, zmm2/m512, imm8
+
To be added.
To be added.
@@ -3033,7 +3354,10 @@
To be added.
To be added.
- __m512i _mm512_shufflelo_epi16 (__m512i a, const int imm8) VPSHUFLW zmm1 {k1}{z}, zmm2/m512, imm8
+
+ __m512i _mm512_shufflelo_epi16 (__m512i a, const int imm8)
+ VPSHUFLW zmm1 {k1}{z}, zmm2/m512, imm8
+
To be added.
To be added.
@@ -3059,7 +3383,10 @@
To be added.
To be added.
- void _mm512_storeu_epi8 (__m512i * mem_addr, __m512i a) VMOVDQU8 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi8 (__m512i * mem_addr, __m512i a)
+ VMOVDQU8 m512 {k1}{z}, zmm1
+
To be added.
@@ -3084,7 +3411,10 @@
To be added.
To be added.
- void _mm512_storeu_epi16 (__m512i * mem_addr, __m512i a) VMOVDQU16 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi16 (__m512i * mem_addr, __m512i a)
+ VMOVDQU16 m512 {k1}{z}, zmm1
+
To be added.
@@ -3109,7 +3439,10 @@
To be added.
To be added.
- void _mm512_storeu_epi8 (__m512i * mem_addr, __m512i a) VMOVDQU8 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi8 (__m512i * mem_addr, __m512i a)
+ VMOVDQU8 m512 {k1}{z}, zmm1
+
To be added.
@@ -3134,7 +3467,10 @@
To be added.
To be added.
- void _mm512_storeu_epi16 (__m512i * mem_addr, __m512i a) VMOVDQU16 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi16 (__m512i * mem_addr, __m512i a)
+ VMOVDQU16 m512 {k1}{z}, zmm1
+
To be added.
@@ -3160,7 +3496,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi8 (__m512i a, __m512i b) VPSUBB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sub_epi8 (__m512i a, __m512i b)
+ VPSUBB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3187,7 +3526,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi16 (__m512i a, __m512i b) VPSUBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sub_epi16 (__m512i a, __m512i b)
+ VPSUBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3214,7 +3556,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi8 (__m512i a, __m512i b) VPSUBB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sub_epi8 (__m512i a, __m512i b)
+ VPSUBB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3241,7 +3586,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi16 (__m512i a, __m512i b) VPSUBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sub_epi16 (__m512i a, __m512i b)
+ VPSUBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3268,7 +3616,10 @@
To be added.
To be added.
- __m512i _mm512_subs_epu8 (__m512i a, __m512i b) VPSUBUSB zmm1 {k1}{z}, zmm2, zmm3/m128
+
+ __m512i _mm512_subs_epu8 (__m512i a, __m512i b)
+ VPSUBUSB zmm1 {k1}{z}, zmm2, zmm3/m128
+
To be added.
To be added.
@@ -3295,7 +3646,10 @@
To be added.
To be added.
- __m512i _mm512_subs_epi16 (__m512i a, __m512i b) VPSUBSW zmm1 {k1}{z}, zmm2, zmm3/m128
+
+ __m512i _mm512_subs_epi16 (__m512i a, __m512i b)
+ VPSUBSW zmm1 {k1}{z}, zmm2, zmm3/m128
+
To be added.
To be added.
@@ -3322,7 +3676,10 @@
To be added.
To be added.
- __m512i _mm512_subs_epi8 (__m512i a, __m512i b) VPSUBSB zmm1 {k1}{z}, zmm2, zmm3/m128
+
+ __m512i _mm512_subs_epi8 (__m512i a, __m512i b)
+ VPSUBSB zmm1 {k1}{z}, zmm2, zmm3/m128
+
To be added.
To be added.
@@ -3349,7 +3706,10 @@
To be added.
To be added.
- __m512i _mm512_subs_epu16 (__m512i a, __m512i b) VPSUBUSW zmm1 {k1}{z}, zmm2, zmm3/m128
+
+ __m512i _mm512_subs_epu16 (__m512i a, __m512i b)
+ VPSUBUSW zmm1 {k1}{z}, zmm2, zmm3/m128
+
To be added.
To be added.
@@ -3376,7 +3736,10 @@
To be added.
To be added.
- __m512i _mm512_sad_epu8 (__m512i a, __m512i b) VPSADBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_sad_epu8 (__m512i a, __m512i b)
+ VPSADBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3412,7 +3775,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_dbsad_epu8 (__m512i a, __m512i b, int imm8) VDBPSADBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_dbsad_epu8 (__m512i a, __m512i b, int imm8)
+ VDBPSADBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3439,7 +3805,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi8 (__m512i a, __m512i b) VPUNPCKHBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpackhi_epi8 (__m512i a, __m512i b)
+ VPUNPCKHBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3466,7 +3835,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi16 (__m512i a, __m512i b) VPUNPCKHWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpackhi_epi16 (__m512i a, __m512i b)
+ VPUNPCKHWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3493,7 +3865,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi8 (__m512i a, __m512i b) VPUNPCKHBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpackhi_epi8 (__m512i a, __m512i b)
+ VPUNPCKHBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3520,7 +3895,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi16 (__m512i a, __m512i b) VPUNPCKHWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpackhi_epi16 (__m512i a, __m512i b)
+ VPUNPCKHWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3547,7 +3925,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi8 (__m512i a, __m512i b) VPUNPCKLBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpacklo_epi8 (__m512i a, __m512i b)
+ VPUNPCKLBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3574,7 +3955,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi16 (__m512i a, __m512i b) VPUNPCKLWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpacklo_epi16 (__m512i a, __m512i b)
+ VPUNPCKLWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3601,7 +3985,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi8 (__m512i a, __m512i b) VPUNPCKLBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpacklo_epi8 (__m512i a, __m512i b)
+ VPUNPCKLBW zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -3628,7 +4015,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi16 (__m512i a, __m512i b) VPUNPCKLWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_unpacklo_epi16 (__m512i a, __m512i b)
+ VPUNPCKLWD zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml
index d01c7ff3ca9..055121f9a1b 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml
@@ -38,7 +38,10 @@
To be added.
- __m128i _mm_conflict_epi32 (__m128i a) VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128i _mm_conflict_epi32 (__m128i a)
+ VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -63,7 +66,10 @@
To be added.
- __m128i _mm_conflict_epi64 (__m128i a) VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_conflict_epi64 (__m128i a)
+ VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -88,7 +94,10 @@
To be added.
- __m128i _mm_conflict_epi32 (__m128i a) VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128i _mm_conflict_epi32 (__m128i a)
+ VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -113,7 +122,10 @@
To be added.
- __m128i _mm_conflict_epi64 (__m128i a) VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_conflict_epi64 (__m128i a)
+ VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -138,7 +150,10 @@
To be added.
- __m256i _mm256_conflict_epi32 (__m256i a) VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256i _mm256_conflict_epi32 (__m256i a)
+ VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -163,7 +178,10 @@
To be added.
- __m256i _mm256_conflict_epi64 (__m256i a) VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_conflict_epi64 (__m256i a)
+ VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -188,7 +206,10 @@
To be added.
- __m256i _mm256_conflict_epi32 (__m256i a) VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256i _mm256_conflict_epi32 (__m256i a)
+ VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -213,7 +234,10 @@
To be added.
- __m256i _mm256_conflict_epi64 (__m256i a) VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_conflict_epi64 (__m256i a)
+ VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -259,7 +283,10 @@
To be added.
- __m128i _mm_lzcnt_epi32 (__m128i a) VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128i _mm_lzcnt_epi32 (__m128i a)
+ VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -284,7 +311,10 @@
To be added.
- __m128i _mm_lzcnt_epi64 (__m128i a) VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_lzcnt_epi64 (__m128i a)
+ VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -309,7 +339,10 @@
To be added.
- __m128i _mm_lzcnt_epi32 (__m128i a) VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128i _mm_lzcnt_epi32 (__m128i a)
+ VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -334,7 +367,10 @@
To be added.
- __m128i _mm_lzcnt_epi64 (__m128i a) VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_lzcnt_epi64 (__m128i a)
+ VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -359,7 +395,10 @@
To be added.
- __m256i _mm256_lzcnt_epi32 (__m256i a) VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256i _mm256_lzcnt_epi32 (__m256i a)
+ VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -384,7 +423,10 @@
To be added.
- __m256i _mm256_lzcnt_epi64 (__m256i a) VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_lzcnt_epi64 (__m256i a)
+ VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -409,7 +451,10 @@
To be added.
- __m256i _mm256_lzcnt_epi32 (__m256i a) VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256i _mm256_lzcnt_epi32 (__m256i a)
+ VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -434,7 +479,10 @@
To be added.
- __m256i _mm256_lzcnt_epi64 (__m256i a) VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_lzcnt_epi64 (__m256i a)
+ VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml b/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml
index f3ed1121d99..db553423f0d 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml
@@ -44,7 +44,10 @@
To be added.
- __m512i _mm512_conflict_epi32 (__m512i a) VPCONFLICTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512i _mm512_conflict_epi32 (__m512i a)
+ VPCONFLICTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -69,7 +72,10 @@
To be added.
- __m512i _mm512_conflict_epi64 (__m512i a) VPCONFLICTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512i _mm512_conflict_epi64 (__m512i a)
+ VPCONFLICTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -94,7 +100,10 @@
To be added.
- __m512i _mm512_conflict_epi32 (__m512i a) VPCONFLICTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512i _mm512_conflict_epi32 (__m512i a)
+ VPCONFLICTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -119,7 +128,10 @@
To be added.
- __m512i _mm512_conflict_epi64 (__m512i a) VPCONFLICTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512i _mm512_conflict_epi64 (__m512i a)
+ VPCONFLICTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -165,7 +177,10 @@
To be added.
- __m512i _mm512_lzcnt_epi32 (__m512i a) VPLZCNTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512i _mm512_lzcnt_epi32 (__m512i a)
+ VPLZCNTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -190,7 +205,10 @@
To be added.
- __m512i _mm512_lzcnt_epi64 (__m512i a) VPLZCNTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512i _mm512_lzcnt_epi64 (__m512i a)
+ VPLZCNTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -215,7 +233,10 @@
To be added.
- __m512i _mm512_lzcnt_epi32 (__m512i a) VPLZCNTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512i _mm512_lzcnt_epi32 (__m512i a)
+ VPLZCNTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -240,7 +261,10 @@
To be added.
- __m512i _mm512_lzcnt_epi64 (__m512i a) VPLZCNTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512i _mm512_lzcnt_epi64 (__m512i a)
+ VPLZCNTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml
index 190c4a24929..f7e9fb6dc8e 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml
@@ -38,7 +38,10 @@
To be added.
- __m128i _mm_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64
+
+ __m128i _mm_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -63,7 +66,10 @@
To be added.
- __m128i _mm_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64
+
+ __m128i _mm_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -88,7 +94,10 @@
To be added.
- __m256i _mm256_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64
+
+ __m256i _mm256_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -113,7 +122,10 @@
To be added.
- __m256 _mm256_broadcast_f32x2 (__m128 a) VBROADCASTF32x2 ymm1 {k1}{z}, xmm2/m64
+
+ __m256 _mm256_broadcast_f32x2 (__m128 a)
+ VBROADCASTF32x2 ymm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -138,7 +150,10 @@
To be added.
- __m256i _mm256_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64
+
+ __m256i _mm256_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -163,7 +178,10 @@
To be added.
- __m128d _mm_cvtepi64_pd (__m128i a) VCVTQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128d _mm_cvtepi64_pd (__m128i a)
+ VCVTQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -188,7 +206,10 @@
To be added.
- __m128d _mm_cvtepu64_pd (__m128i a) VCVTUQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128d _mm_cvtepu64_pd (__m128i a)
+ VCVTUQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -213,7 +234,10 @@
To be added.
- __m128i _mm_cvtpd_epi64 (__m128d a) VCVTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_cvtpd_epi64 (__m128d a)
+ VCVTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -238,7 +262,10 @@
To be added.
- __m128i _mm_cvtps_epi64 (__m128 a) VCVTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
+ __m128i _mm_cvtps_epi64 (__m128 a)
+ VCVTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -263,7 +290,10 @@
To be added.
- __m128i _mm_cvttpd_epi64 (__m128d a) VCVTTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_cvttpd_epi64 (__m128d a)
+ VCVTTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -288,7 +318,10 @@
To be added.
- __m128i _mm_cvttps_epi64 (__m128 a) VCVTTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
+ __m128i _mm_cvttps_epi64 (__m128 a)
+ VCVTTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -313,7 +346,10 @@
To be added.
- __m128 _mm_cvtepi64_ps (__m128i a) VCVTQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128 _mm_cvtepi64_ps (__m128i a)
+ VCVTQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -338,7 +374,10 @@
To be added.
- __m128 _mm_cvtepu64_ps (__m128i a) VCVTUQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128 _mm_cvtepu64_ps (__m128i a)
+ VCVTUQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -363,7 +402,10 @@
To be added.
- __m128 _mm256_cvtepi64_ps (__m256i a) VCVTQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m128 _mm256_cvtepi64_ps (__m256i a)
+ VCVTQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -388,7 +430,10 @@
To be added.
- __m128 _mm256_cvtepu64_ps (__m256i a) VCVTUQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m128 _mm256_cvtepu64_ps (__m256i a)
+ VCVTUQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -413,7 +458,10 @@
To be added.
- __m128i _mm_cvtpd_epu64 (__m128d a) VCVTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_cvtpd_epu64 (__m128d a)
+ VCVTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -438,7 +486,10 @@
To be added.
- __m128i _mm_cvtps_epu64 (__m128 a) VCVTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
+ __m128i _mm_cvtps_epu64 (__m128 a)
+ VCVTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -463,7 +514,10 @@
To be added.
- __m128i _mm_cvttpd_epu64 (__m128d a) VCVTTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_cvttpd_epu64 (__m128d a)
+ VCVTTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -488,7 +542,10 @@
To be added.
- __m128i _mm_cvttps_epu64 (__m128 a) VCVTTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
+ __m128i _mm_cvttps_epu64 (__m128 a)
+ VCVTTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -513,7 +570,10 @@
To be added.
- __m256d _mm256_cvtepi64_pd (__m256i a) VCVTQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256d _mm256_cvtepi64_pd (__m256i a)
+ VCVTQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -538,7 +598,10 @@
To be added.
- __m256d _mm256_cvtepu64_pd (__m256i a) VCVTUQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256d _mm256_cvtepu64_pd (__m256i a)
+ VCVTUQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -563,7 +626,10 @@
To be added.
- __m256i _mm256_cvtps_epi64 (__m128 a) VCVTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m256i _mm256_cvtps_epi64 (__m128 a)
+ VCVTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -588,7 +654,10 @@
To be added.
- __m256i _mm256_cvtpd_epi64 (__m256d a) VCVTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_cvtpd_epi64 (__m256d a)
+ VCVTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -613,7 +682,10 @@
To be added.
- __m256i _mm256_cvttps_epi64 (__m128 a) VCVTTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m256i _mm256_cvttps_epi64 (__m128 a)
+ VCVTTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -638,7 +710,10 @@
To be added.
- __m256i _mm256_cvttpd_epi64 (__m256d a) VCVTTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_cvttpd_epi64 (__m256d a)
+ VCVTTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -663,7 +738,10 @@
To be added.
- __m256i _mm256_cvtps_epu64 (__m128 a) VCVTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m256i _mm256_cvtps_epu64 (__m128 a)
+ VCVTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -688,7 +766,10 @@
To be added.
- __m256i _mm256_cvtpd_epu64 (__m256d a) VCVTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_cvtpd_epu64 (__m256d a)
+ VCVTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -713,7 +794,10 @@
To be added.
- __m256i _mm256_cvttps_epu64 (__m128 a) VCVTTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m256i _mm256_cvttps_epu64 (__m128 a)
+ VCVTTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -738,7 +822,10 @@
To be added.
- __m256i _mm256_cvttpd_epu64 (__m256d a) VCVTTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_cvttpd_epu64 (__m256d a)
+ VCVTTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -786,7 +873,10 @@
To be added.
To be added.
- __m128i _mm_mullo_epi64 (__m128i a, __m128i b) VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_mullo_epi64 (__m128i a, __m128i b)
+ VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -813,7 +903,10 @@
To be added.
To be added.
- __m128i _mm_mullo_epi64 (__m128i a, __m128i b) VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_mullo_epi64 (__m128i a, __m128i b)
+ VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -840,7 +933,10 @@
To be added.
To be added.
- __m256i _mm256_mullo_epi64 (__m256i a, __m256i b) VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_mullo_epi64 (__m256i a, __m256i b)
+ VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -867,7 +963,10 @@
To be added.
To be added.
- __m256i _mm256_mullo_epi64 (__m256i a, __m256i b) VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_mullo_epi64 (__m256i a, __m256i b)
+ VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -903,7 +1002,9 @@
To be added.
To be added.
To be added.
- __m128d _mm_range_pd(__m128d a, __m128d b, int imm); VRANGEPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
+ __m128d _mm_range_pd(__m128d a, __m128d b, int imm); VRANGEPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -939,7 +1040,9 @@
To be added.
To be added.
To be added.
- __m128 _mm_range_ps(__m128 a, __m128 b, int imm); VRANGEPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
+ __m128 _mm_range_ps(__m128 a, __m128 b, int imm); VRANGEPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -975,7 +1078,9 @@
To be added.
To be added.
To be added.
- __m256d _mm256_range_pd(__m256d a, __m256d b, int imm); VRANGEPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256d _mm256_range_pd(__m256d a, __m256d b, int imm); VRANGEPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -1011,7 +1116,9 @@
To be added.
To be added.
To be added.
- __m256 _mm256_range_ps(__m256 a, __m256 b, int imm); VRANGEPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256 _mm256_range_ps(__m256 a, __m256 b, int imm); VRANGEPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -1045,7 +1152,9 @@
To be added.
To be added.
- __m128d _mm_reduce_pd(__m128d a, int imm); VREDUCEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
+ __m128d _mm_reduce_pd(__m128d a, int imm); VREDUCEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -1079,7 +1188,9 @@
To be added.
To be added.
- __m128 _mm_reduce_ps(__m128 a, int imm); VREDUCEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
+ __m128 _mm_reduce_ps(__m128 a, int imm); VREDUCEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -1113,7 +1224,9 @@
To be added.
To be added.
- __m256d _mm256_reduce_pd(__m256d a, int imm); VREDUCEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
+ __m256d _mm256_reduce_pd(__m256d a, int imm); VREDUCEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -1147,7 +1260,9 @@
To be added.
To be added.
- __m256 _mm256_reduce_ps(__m256 a, int imm); VREDUCEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
+ __m256 _mm256_reduce_ps(__m256 a, int imm); VREDUCEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml b/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml
index 3cc0c3fdfd3..d04bf554b1a 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml
@@ -46,7 +46,10 @@
To be added.
To be added.
- __m512d _mm512_and_pd (__m512d a, __m512d b) VANDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_and_pd (__m512d a, __m512d b)
+ VANDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -73,7 +76,10 @@
To be added.
To be added.
- __m512 _mm512_and_ps (__m512 a, __m512 b) VANDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_and_ps (__m512 a, __m512 b)
+ VANDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -100,7 +106,10 @@
To be added.
To be added.
- __m512d _mm512_andnot_pd (__m512d a, __m512d b) VANDNPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_andnot_pd (__m512d a, __m512d b)
+ VANDNPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -127,7 +136,10 @@
To be added.
To be added.
- __m512 _mm512_andnot_ps (__m512 a, __m512 b) VANDNPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_andnot_ps (__m512 a, __m512 b)
+ VANDNPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -152,7 +164,10 @@
To be added.
- __m512i _mm512_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -177,7 +192,10 @@
To be added.
- __m512 _mm512_broadcast_f32x2 (__m128 a) VBROADCASTF32x2 zmm1 {k1}{z}, xmm2/m64
+
+ __m512 _mm512_broadcast_f32x2 (__m128 a)
+ VBROADCASTF32x2 zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -202,7 +220,10 @@
To be added.
- __m512i _mm512_broadcast_i32x2 (__m128i a) VBROADCASTI32x2 zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -226,7 +247,10 @@
To be added.
- __m512d _mm512_broadcast_f64x2 (__m128d const * mem_addr) VBROADCASTF64x2 zmm1 {k1}{z}, m128
+
+ __m512d _mm512_broadcast_f64x2 (__m128d const * mem_addr)
+ VBROADCASTF64x2 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -250,7 +274,10 @@
To be added.
- __m512i _mm512_broadcast_i64x2 (__m128i const * mem_addr) VBROADCASTI64x2 zmm1 {k1}{z}, m128
+
+ __m512i _mm512_broadcast_i64x2 (__m128i const * mem_addr)
+ VBROADCASTI64x2 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -274,7 +301,10 @@
To be added.
- __m512i _mm512_broadcast_i64x2 (__m128i const * mem_addr) VBROADCASTI64x2 zmm1 {k1}{z}, m128
+
+ __m512i _mm512_broadcast_i64x2 (__m128i const * mem_addr)
+ VBROADCASTI64x2 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -298,7 +328,10 @@
To be added.
- __m512i _mm512_broadcast_i32x8 (__m256i const * mem_addr) VBROADCASTI32x8 zmm1 {k1}{z}, m256
+
+ __m512i _mm512_broadcast_i32x8 (__m256i const * mem_addr)
+ VBROADCASTI32x8 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -322,7 +355,10 @@
To be added.
- __m512 _mm512_broadcast_f32x8 (__m256 const * mem_addr) VBROADCASTF32x8 zmm1 {k1}{z}, m256
+
+ __m512 _mm512_broadcast_f32x8 (__m256 const * mem_addr)
+ VBROADCASTF32x8 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -346,7 +382,10 @@
To be added.
- __m512i _mm512_broadcast_i32x8 (__m256i const * mem_addr) VBROADCASTI32x8 zmm1 {k1}{z}, m256
+
+ __m512i _mm512_broadcast_i32x8 (__m256i const * mem_addr)
+ VBROADCASTI32x8 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -371,7 +410,10 @@
To be added.
- __m512 _mm512_cvtepi64_ps (__m512i a) VCVTQQ2PS ymm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512 _mm512_cvtepi64_ps (__m512i a)
+ VCVTQQ2PS ymm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -396,7 +438,10 @@
To be added.
- __m512 _mm512_cvtepu64_ps (__m512i a) VCVTUQQ2PS ymm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512 _mm512_cvtepu64_ps (__m512i a)
+ VCVTUQQ2PS ymm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -421,7 +466,10 @@
To be added.
- __m512d _mm512_cvtepi64_pd (__m512i a) VCVTQQ2PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512d _mm512_cvtepi64_pd (__m512i a)
+ VCVTQQ2PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -446,7 +494,10 @@
To be added.
- __m512d _mm512_cvtepu64_pd (__m512i a) VCVTUQQ2PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512d _mm512_cvtepu64_pd (__m512i a)
+ VCVTUQQ2PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -471,7 +522,10 @@
To be added.
- __m512i _mm512_cvtps_epi64 (__m512 a) VCVTPS2QQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
+ __m512i _mm512_cvtps_epi64 (__m512 a)
+ VCVTPS2QQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -496,7 +550,10 @@
To be added.
- __m512i _mm512_cvtpd_epi64 (__m512d a) VCVTPD2QQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m512i _mm512_cvtpd_epi64 (__m512d a)
+ VCVTPD2QQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -521,7 +578,10 @@
To be added.
- __m512i _mm512_cvttps_epi64 (__m512 a) VCVTTPS2QQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
+ __m512i _mm512_cvttps_epi64 (__m512 a)
+ VCVTTPS2QQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -546,7 +606,10 @@
To be added.
- __m512i _mm512_cvttpd_epi64 (__m512 a) VCVTTPD2QQ zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
+ __m512i _mm512_cvttpd_epi64 (__m512 a)
+ VCVTTPD2QQ zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -571,7 +634,10 @@
To be added.
- __m512i _mm512_cvtps_epu64 (__m512 a) VCVTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
+ __m512i _mm512_cvtps_epu64 (__m512 a)
+ VCVTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -596,7 +662,10 @@
To be added.
- __m512i _mm512_cvtpd_epu64 (__m512d a) VCVTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m512i _mm512_cvtpd_epu64 (__m512d a)
+ VCVTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -621,7 +690,10 @@
To be added.
- __m512i _mm512_cvttps_epu64 (__m512 a) VCVTTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
+ __m512i _mm512_cvttps_epu64 (__m512 a)
+ VCVTTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -646,7 +718,10 @@
To be added.
- __m512i _mm512_cvttpd_epu64 (__m512d a) VCVTTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m512i _mm512_cvttpd_epu64 (__m512d a)
+ VCVTTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -680,7 +755,10 @@
To be added.
To be added.
- __m128d _mm512_extractf64x2_pd (__m512d a, const int imm8) VEXTRACTF64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128d _mm512_extractf64x2_pd (__m512d a, const int imm8)
+ VEXTRACTF64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -714,7 +792,10 @@
To be added.
To be added.
- __m128i _mm512_extracti64x2_epi64 (__m512i a, const int imm8) VEXTRACTI64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti64x2_epi64 (__m512i a, const int imm8)
+ VEXTRACTI64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -748,7 +829,10 @@
To be added.
To be added.
- __m128i _mm512_extracti64x2_epi64 (__m512i a, const int imm8) VEXTRACTI64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti64x2_epi64 (__m512i a, const int imm8)
+ VEXTRACTI64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -782,7 +866,10 @@
To be added.
To be added.
- __m256i _mm512_extracti32x8_epi32 (__m512i a, const int imm8) VEXTRACTI32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti32x8_epi32 (__m512i a, const int imm8)
+ VEXTRACTI32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -816,7 +903,10 @@
To be added.
To be added.
- __m256 _mm512_extractf32x8_ps (__m512 a, const int imm8) VEXTRACTF32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256 _mm512_extractf32x8_ps (__m512 a, const int imm8)
+ VEXTRACTF32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -850,7 +940,10 @@
To be added.
To be added.
- __m256i _mm512_extracti32x8_epi32 (__m512i a, const int imm8) VEXTRACTI32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti32x8_epi32 (__m512i a, const int imm8)
+ VEXTRACTI32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -886,7 +979,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_insertf64x2_pd (__m512d a, __m128d b, int imm8) VINSERTF64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512d _mm512_insertf64x2_pd (__m512d a, __m128d b, int imm8)
+ VINSERTF64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -922,7 +1018,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti64x2_si512 (__m512i a, __m128i b, const int imm8) VINSERTI64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti64x2_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -958,7 +1057,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti64x2_si512 (__m512i a, __m128i b, const int imm8) VINSERTI64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti64x2_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -994,7 +1096,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti32x8_si512 (__m512i a, __m256i b, const int imm8) VINSERTI32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti32x8_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -1030,7 +1135,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_insertf32x8_ps (__m512 a, __m256 b, int imm8) VINSERTF32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512 _mm512_insertf32x8_ps (__m512 a, __m256 b, int imm8)
+ VINSERTF32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -1066,7 +1174,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti32x8_si512 (__m512i a, __m256i b, const int imm8) VINSERTI32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti32x8_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -1114,7 +1225,10 @@
To be added.
To be added.
- __m512i _mm512_mullo_epi64 (__m512i a, __m512i b) VPMULLQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_mullo_epi64 (__m512i a, __m512i b)
+ VPMULLQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1141,7 +1255,10 @@
To be added.
To be added.
- __m512i _mm512_mullo_epi64 (__m512i a, __m512i b) VPMULLQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_mullo_epi64 (__m512i a, __m512i b)
+ VPMULLQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1168,7 +1285,10 @@
To be added.
To be added.
- __m512d _mm512_or_pd (__m512d a, __m512d b) VORPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_or_pd (__m512d a, __m512d b)
+ VORPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1195,7 +1315,10 @@
To be added.
To be added.
- __m512 _mm512_or_ps (__m512 a, __m512 b) VORPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_or_ps (__m512 a, __m512 b)
+ VORPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -1231,7 +1354,9 @@
To be added.
To be added.
To be added.
- __m512d _mm512_range_pd(__m512d a, __m512d b, int imm); VRANGEPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
+ __m512d _mm512_range_pd(__m512d a, __m512d b, int imm); VRANGEPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -1267,7 +1392,9 @@
To be added.
To be added.
To be added.
- __m512 _mm512_range_ps(__m512 a, __m512 b, int imm); VRANGEPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
+ __m512 _mm512_range_ps(__m512 a, __m512 b, int imm); VRANGEPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -1303,7 +1430,9 @@
To be added.
To be added.
To be added.
- __m128d _mm_range_sd(__m128d a, __m128d b, int imm); VRANGESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
+ __m128d _mm_range_sd(__m128d a, __m128d b, int imm); VRANGESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -1339,7 +1468,9 @@
To be added.
To be added.
To be added.
- __m128 _mm_range_ss(__m128 a, __m128 b, int imm); VRANGESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
+ __m128 _mm_range_ss(__m128 a, __m128 b, int imm); VRANGESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -1373,7 +1504,9 @@
To be added.
To be added.
- __m512d _mm512_reduce_pd(__m512d a, int imm); VREDUCEPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}, imm8
+
+ __m512d _mm512_reduce_pd(__m512d a, int imm); VREDUCEPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -1407,7 +1540,9 @@
To be added.
To be added.
- __m512 _mm512_reduce_ps(__m512 a, int imm); VREDUCEPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}, imm8
+
+ __m512 _mm512_reduce_ps(__m512 a, int imm); VREDUCEPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -1441,7 +1576,9 @@
To be added.
To be added.
- __m128d _mm_reduce_sd(__m128d a, int imm); VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
+ __m128d _mm_reduce_sd(__m128d a, int imm); VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -1475,7 +1612,9 @@
To be added.
To be added.
- __m128 _mm_reduce_ss(__m128 a, int imm); VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
+ __m128 _mm_reduce_ss(__m128 a, int imm); VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -1511,7 +1650,9 @@
To be added.
To be added.
To be added.
- __m128d _mm_reduce_sd(__m128d a, __m128d b, int imm); VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128d _mm_reduce_sd(__m128d a, __m128d b, int imm); VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -1547,7 +1688,9 @@
To be added.
To be added.
To be added.
- __m128 _mm_reduce_ss(__m128 a, __m128 b, int imm); VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128 _mm_reduce_ss(__m128 a, __m128 b, int imm); VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -1574,7 +1717,10 @@
To be added.
To be added.
- __m512d _mm512_xor_pd (__m512d a, __m512d b) VXORPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_xor_pd (__m512d a, __m512d b)
+ VXORPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1601,7 +1747,10 @@
To be added.
To be added.
- __m512 _mm512_xor_ps (__m512 a, __m512 b) VXORPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_xor_ps (__m512 a, __m512 b)
+ VXORPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml
index 0cb78049fc8..9791d754d3c 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml
@@ -38,7 +38,10 @@
To be added.
- __m128i _mm_abs_epi64 (__m128i a) VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_abs_epi64 (__m128i a)
+ VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -63,7 +66,10 @@
To be added.
- __m256i _mm256_abs_epi64 (__m128i a) VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256i _mm256_abs_epi64 (__m128i a)
+ VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -99,7 +105,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count) VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
+ __m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)
+ VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -135,7 +144,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count) VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
+ __m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)
+ VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -171,7 +183,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count) VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)
+ VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -207,7 +222,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count) VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)
+ VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -243,7 +261,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count) VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
+ __m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)
+ VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -279,7 +300,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count) VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
+ __m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)
+ VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -315,7 +339,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count) VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)
+ VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -351,7 +378,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count) VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)
+ VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -378,7 +408,10 @@
To be added.
To be added.
- __m128i _mm_cmpgt_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)
+
+ __m128i _mm_cmpgt_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)
+
To be added.
To be added.
@@ -405,7 +438,10 @@
To be added.
To be added.
- __m128i _mm_cmpgt_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)
+
+ __m128i _mm_cmpgt_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)
+
To be added.
To be added.
@@ -432,7 +468,10 @@
To be added.
To be added.
- __m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6)
+
+ __m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6)
+
To be added.
To be added.
@@ -459,7 +498,10 @@
To be added.
To be added.
- __m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)
+
+ __m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)
+
To be added.
To be added.
@@ -486,7 +528,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)
+
+ __m128i _mm_cmpge_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -513,7 +558,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)
+
+ __m128i _mm_cmpge_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -540,7 +588,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)
+
+ __m128i _mm_cmpge_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -567,7 +618,10 @@
To be added.
To be added.
- __m128i _mm_cmpge_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)
+
+ __m128i _mm_cmpge_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -594,7 +648,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)
+
+ __m256i _mm256_cmpge_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -621,7 +678,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)
+
+ __m256i _mm256_cmpge_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -648,7 +708,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)
+
+ __m256i _mm256_cmpge_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -675,7 +738,10 @@
To be added.
To be added.
- __m256i _mm256_cmpge_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)
+
+ __m256i _mm256_cmpge_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -702,7 +768,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)
+
+ __m128i _mm_cmplt_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -729,7 +798,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)
+
+ __m128i _mm_cmplt_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -756,7 +828,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)
+
+ __m128i _mm_cmplt_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -783,7 +858,10 @@
To be added.
To be added.
- __m128i _mm_cmplt_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)
+
+ __m128i _mm_cmplt_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -810,7 +888,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)
+
+ __m256i _mm256_cmplt_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -837,7 +918,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)
+
+ __m256i _mm256_cmplt_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -864,7 +948,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)
+
+ __m256i _mm256_cmplt_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -891,7 +978,10 @@
To be added.
To be added.
- __m256i _mm256_cmplt_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)
+
+ __m256i _mm256_cmplt_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -918,7 +1008,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)
+
+ __m128i _mm_cmple_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -945,7 +1038,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)
+
+ __m128i _mm_cmple_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -972,7 +1068,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)
+
+ __m128i _mm_cmple_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -999,7 +1098,10 @@
To be added.
To be added.
- __m128i _mm_cmple_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)
+
+ __m128i _mm_cmple_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -1026,7 +1128,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)
+
+ __m256i _mm256_cmple_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -1053,7 +1158,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)
+
+ __m256i _mm256_cmple_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -1080,7 +1188,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)
+
+ __m256i _mm256_cmple_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -1107,7 +1218,10 @@
To be added.
To be added.
- __m256i _mm256_cmple_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)
+
+ __m256i _mm256_cmple_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -1134,7 +1248,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epi32 (__m128i a, __m128i b) VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)
+
+ __m128i _mm_cmpne_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -1161,7 +1278,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epi64 (__m128i a, __m128i b) VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)
+
+ __m128i _mm_cmpne_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -1188,7 +1308,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epu32 (__m128i a, __m128i b) VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)
+
+ __m128i _mm_cmpne_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -1215,7 +1338,10 @@
To be added.
To be added.
- __m128i _mm_cmpne_epu64 (__m128i a, __m128i b) VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)
+
+ __m128i _mm_cmpne_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -1242,7 +1368,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epi32 (__m256i a, __m256i b) VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)
+
+ __m256i _mm256_cmpne_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -1269,7 +1398,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epi64 (__m256i a, __m256i b) VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)
+
+ __m256i _mm256_cmpne_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -1296,7 +1428,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epu32 (__m256i a, __m256i b) VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)
+
+ __m256i _mm256_cmpne_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -1323,7 +1458,10 @@
To be added.
To be added.
- __m256i _mm256_cmpne_epu64 (__m256i a, __m256i b) VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)
+
+ __m256i _mm256_cmpne_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -1348,7 +1486,10 @@
To be added.
- __m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1373,7 +1514,10 @@
To be added.
- __m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1398,7 +1542,10 @@
To be added.
- __m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1423,7 +1570,10 @@
To be added.
- __m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1448,7 +1598,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1473,7 +1626,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1498,7 +1654,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1523,7 +1682,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1548,7 +1710,10 @@
To be added.
- __m128i _mm_cvtusepi32_epi8 (__m128i a) VPMOVUSDB xmm1/m32 {k1}{z}, xmm2
+
+ __m128i _mm_cvtusepi32_epi8 (__m128i a)
+ VPMOVUSDB xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1573,7 +1738,10 @@
To be added.
- __m128i _mm_cvtusepi64_epi8 (__m128i a) VPMOVUSQB xmm1/m16 {k1}{z}, xmm2
+
+ __m128i _mm_cvtusepi64_epi8 (__m128i a)
+ VPMOVUSQB xmm1/m16 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1598,7 +1766,10 @@
To be added.
- __m128i _mm256_cvtusepi32_epi8 (__m256i a) VPMOVUSDB xmm1/m64 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtusepi32_epi8 (__m256i a)
+ VPMOVUSDB xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1623,7 +1794,10 @@
To be added.
- __m128i _mm256_cvtusepi64_epi8 (__m256i a) VPMOVUSQB xmm1/m32 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtusepi64_epi8 (__m256i a)
+ VPMOVUSQB xmm1/m32 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -1648,7 +1822,10 @@
To be added.
- __m128d _mm_cvtepu32_pd (__m128i a) VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst
+
+ __m128d _mm_cvtepu32_pd (__m128i a)
+ VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -1673,7 +1850,10 @@
To be added.
- __m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1698,7 +1878,10 @@
To be added.
- __m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1723,7 +1906,10 @@
To be added.
- __m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1748,7 +1934,10 @@
To be added.
- __m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1773,7 +1962,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1798,7 +1990,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1823,7 +2018,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1848,7 +2046,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1873,7 +2074,10 @@
To be added.
- __m128i _mm_cvtsepi32_epi16 (__m128i a) VPMOVSDW xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm_cvtsepi32_epi16 (__m128i a)
+ VPMOVSDW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1898,7 +2102,10 @@
To be added.
- __m128i _mm_cvtsepi64_epi16 (__m128i a) VPMOVSQW xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm_cvtsepi64_epi16 (__m128i a)
+ VPMOVSQW xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1923,7 +2130,10 @@
To be added.
- __m128i _mm256_cvtsepi32_epi16 (__m256i a) VPMOVSDW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtsepi32_epi16 (__m256i a)
+ VPMOVSDW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1948,7 +2158,10 @@
To be added.
- __m128i _mm256_cvtsepi64_epi16 (__m256i a) VPMOVSQW xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtsepi64_epi16 (__m256i a)
+ VPMOVSQW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -1973,7 +2186,10 @@
To be added.
- __m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -1998,7 +2214,10 @@
To be added.
- __m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2023,7 +2242,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2048,7 +2270,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2073,7 +2298,10 @@
To be added.
- __m128i _mm_cvtsepi64_epi32 (__m128i a) VPMOVSQD xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtsepi64_epi32 (__m128i a)
+ VPMOVSQD xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2098,7 +2326,10 @@
To be added.
- __m128i _mm256_cvtsepi64_epi32 (__m256i a) VPMOVSQD xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtsepi64_epi32 (__m256i a)
+ VPMOVSQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2123,7 +2354,10 @@
To be added.
- __m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2148,7 +2382,10 @@
To be added.
- __m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2173,7 +2410,10 @@
To be added.
- __m128i _mm_cvtepi32_epi8 (__m128i a) VPMOVDB xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2198,7 +2438,10 @@
To be added.
- __m128i _mm_cvtepi64_epi8 (__m128i a) VPMOVQB xmm1/m16 {k1}{z}, zmm2
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2223,7 +2466,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2248,7 +2494,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2273,7 +2522,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi8 (__m256i a) VPMOVDB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2298,7 +2550,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi8 (__m256i a) VPMOVQB xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2323,7 +2578,10 @@
To be added.
- __m128i _mm_cvtsepi32_epi8 (__m128i a) VPMOVSDB xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm_cvtsepi32_epi8 (__m128i a)
+ VPMOVSDB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2348,7 +2606,10 @@
To be added.
- __m128i _mm_cvtsepi64_epi8 (__m128i a) VPMOVSQB xmm1/m16 {k1}{z}, zmm2
+
+ __m128i _mm_cvtsepi64_epi8 (__m128i a)
+ VPMOVSQB xmm1/m16 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2373,7 +2634,10 @@
To be added.
- __m128i _mm256_cvtsepi32_epi8 (__m256i a) VPMOVSDB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtsepi32_epi8 (__m256i a)
+ VPMOVSDB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2398,7 +2662,10 @@
To be added.
- __m128i _mm256_cvtsepi64_epi8 (__m256i a) VPMOVSQB xmm1/m32 {k1}{z}, zmm2
+
+ __m128i _mm256_cvtsepi64_epi8 (__m256i a)
+ VPMOVSQB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2423,7 +2690,10 @@
To be added.
- __m128 _mm_cvtepu32_ps (__m128i a) VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128 _mm_cvtepu32_ps (__m128i a)
+ VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -2448,7 +2718,10 @@
To be added.
- __m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2473,7 +2746,10 @@
To be added.
- __m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2498,7 +2774,10 @@
To be added.
- __m128i _mm_cvtepi32_epi16 (__m128i a) VPMOVDW xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2523,7 +2802,10 @@
To be added.
- __m128i _mm_cvtepi64_epi16 (__m128i a) VPMOVQW xmm1/m32 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2548,7 +2830,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2573,7 +2858,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2598,7 +2886,10 @@
To be added.
- __m128i _mm256_cvtepi32_epi16 (__m256i a) VPMOVDW xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2623,7 +2914,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi16 (__m256i a) VPMOVQW xmm1/m64 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2648,7 +2942,10 @@
To be added.
- __m128i _mm_cvtusepi32_epi16 (__m128i a) VPMOVUSDW xmm1/m64 {k1}{z}, xmm2
+
+ __m128i _mm_cvtusepi32_epi16 (__m128i a)
+ VPMOVUSDW xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2673,7 +2970,10 @@
To be added.
- __m128i _mm_cvtusepi64_epi16 (__m128i a) VPMOVUSQW xmm1/m32 {k1}{z}, xmm2
+
+ __m128i _mm_cvtusepi64_epi16 (__m128i a)
+ VPMOVUSQW xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2698,7 +2998,10 @@
To be added.
- __m128i _mm256_cvtusepi32_epi16 (__m256i a) VPMOVUSDW xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtusepi32_epi16 (__m256i a)
+ VPMOVUSDW xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2723,7 +3026,10 @@
To be added.
- __m128i _mm256_cvtusepi64_epi16 (__m256i a) VPMOVUSQW xmm1/m64 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtusepi64_epi16 (__m256i a)
+ VPMOVUSQW xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2748,7 +3054,10 @@
To be added.
- __m128i _mm_cvtpd_epu32 (__m128d a) VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_cvtpd_epu32 (__m128d a)
+ VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -2773,7 +3082,10 @@
To be added.
- __m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m128 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m128 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2798,7 +3110,10 @@
To be added.
- __m128i _mm_cvtps_epu32 (__m128 a) VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128i _mm_cvtps_epu32 (__m128 a)
+ VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -2823,7 +3138,10 @@
To be added.
- __m128i _mm_cvtepi64_epi32 (__m128i a) VPMOVQD xmm1/m128 {k1}{z}, xmm2
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m128 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2848,7 +3166,10 @@
To be added.
- __m128i _mm256_cvtpd_epu32 (__m256d a) VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m128i _mm256_cvtpd_epu32 (__m256d a)
+ VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -2873,7 +3194,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2898,7 +3222,10 @@
To be added.
- __m128i _mm256_cvtepi64_epi32 (__m256i a) VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2923,7 +3250,10 @@
To be added.
- __m128i _mm_cvtusepi64_epi32 (__m128i a) VPMOVUSQD xmm1/m128 {k1}{z}, xmm2
+
+ __m128i _mm_cvtusepi64_epi32 (__m128i a)
+ VPMOVUSQD xmm1/m128 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2948,7 +3278,10 @@
To be added.
- __m128i _mm256_cvtusepi64_epi32 (__m256i a) VPMOVUSQD xmm1/m128 {k1}{z}, ymm2
+
+ __m128i _mm256_cvtusepi64_epi32 (__m256i a)
+ VPMOVUSQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -2973,7 +3306,10 @@
To be added.
- __m128i _mm_cvttpd_epu32 (__m128d a) VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128i _mm_cvttpd_epu32 (__m128d a)
+ VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -2998,7 +3334,10 @@
To be added.
- __m128i _mm_cvttps_epu32 (__m128 a) VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128i _mm_cvttps_epu32 (__m128 a)
+ VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -3023,7 +3362,10 @@
To be added.
- __m128i _mm256_cvttpd_epu32 (__m256d a) VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m128i _mm256_cvttpd_epu32 (__m256d a)
+ VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -3048,7 +3390,10 @@
To be added.
- __m256d _mm512_cvtepu32_pd (__m128i a) VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m256d _mm512_cvtepu32_pd (__m128i a)
+ VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -3073,7 +3418,10 @@
To be added.
- __m256 _mm256_cvtepu32_ps (__m256i a) VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256 _mm256_cvtepu32_ps (__m256i a)
+ VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3098,7 +3446,10 @@
To be added.
- __m256i _mm256_cvtps_epu32 (__m256 a) VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256i _mm256_cvtps_epu32 (__m256 a)
+ VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3123,7 +3474,10 @@
To be added.
- __m256i _mm256_cvttps_epu32 (__m256 a) VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256i _mm256_cvttps_epu32 (__m256 a)
+ VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3161,7 +3515,9 @@
To be added.
To be added.
To be added.
- __m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
+ __m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -3199,7 +3555,9 @@
To be added.
To be added.
To be added.
- __m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
+ __m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -3237,7 +3595,9 @@
To be added.
To be added.
To be added.
- __m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm); VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm); VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -3275,7 +3635,9 @@
To be added.
To be added.
To be added.
- __m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm); VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm); VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -3300,7 +3662,10 @@
To be added.
- __m128d _mm_getexp_pd (__m128d a) VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128d _mm_getexp_pd (__m128d a)
+ VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -3325,7 +3690,10 @@
To be added.
- __m128 _mm_getexp_ps (__m128 a) VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128 _mm_getexp_ps (__m128 a)
+ VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -3350,7 +3718,10 @@
To be added.
- __m256d _mm256_getexp_pd (__m256d a) VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256d _mm256_getexp_pd (__m256d a)
+ VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -3375,7 +3746,10 @@
To be added.
- __m256 _mm256_getexp_ps (__m256 a) VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256 _mm256_getexp_ps (__m256 a)
+ VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3409,7 +3783,10 @@
To be added.
To be added.
- __m128d _mm_getmant_pd (__m128d a) VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128d _mm_getmant_pd (__m128d a)
+ VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -3443,7 +3820,10 @@
To be added.
To be added.
- __m128 _mm_getmant_ps (__m128 a) VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128 _mm_getmant_ps (__m128 a)
+ VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -3477,7 +3857,10 @@
To be added.
To be added.
- __m256d _mm256_getmant_pd (__m256d a) VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256d _mm256_getmant_pd (__m256d a)
+ VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -3511,7 +3894,10 @@
To be added.
To be added.
- __m256 _mm256_getmant_ps (__m256 a) VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256 _mm256_getmant_ps (__m256 a)
+ VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3559,7 +3945,10 @@
To be added.
To be added.
- __m128i _mm_max_epi64 (__m128i a, __m128i b) VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_max_epi64 (__m128i a, __m128i b)
+ VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3586,7 +3975,10 @@
To be added.
To be added.
- __m128i _mm_max_epu64 (__m128i a, __m128i b) VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_max_epu64 (__m128i a, __m128i b)
+ VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3613,7 +4005,10 @@
To be added.
To be added.
- __m256i _mm256_max_epi64 (__m256i a, __m256i b) VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_max_epi64 (__m256i a, __m256i b)
+ VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -3640,7 +4035,10 @@
To be added.
To be added.
- __m256i _mm256_max_epu64 (__m256i a, __m256i b) VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_max_epu64 (__m256i a, __m256i b)
+ VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -3667,7 +4065,10 @@
To be added.
To be added.
- __m128i _mm_min_epi64 (__m128i a, __m128i b) VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_min_epi64 (__m128i a, __m128i b)
+ VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3694,7 +4095,10 @@
To be added.
To be added.
- __m128i _mm_min_epu64 (__m128i a, __m128i b) VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_min_epu64 (__m128i a, __m128i b)
+ VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3721,7 +4125,10 @@
To be added.
To be added.
- __m256i _mm256_min_epi64 (__m256i a, __m256i b) VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_min_epi64 (__m256i a, __m256i b)
+ VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -3748,7 +4155,10 @@
To be added.
To be added.
- __m256i _mm256_min_epu64 (__m256i a, __m256i b) VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_min_epu64 (__m256i a, __m256i b)
+ VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -3777,7 +4187,10 @@
To be added.
To be added.
To be added.
- __m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b) VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b)
+ VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3806,7 +4219,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b) VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3835,7 +4251,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b) VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -3864,7 +4283,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b) VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -3893,7 +4315,10 @@
To be added.
To be added.
To be added.
- __m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b) VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b)
+ VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -3922,7 +4347,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b) VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -3949,7 +4377,10 @@
To be added.
To be added.
- __m256d _mm256_permute4x64_pd (__m256d a, __m256i b) VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256d _mm256_permute4x64_pd (__m256d a, __m256i b)
+ VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -3976,7 +4407,10 @@
To be added.
To be added.
- __m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b) VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b)
+ VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4003,7 +4437,10 @@
To be added.
To be added.
- __m256i _mm256_permute4x64_pd (__m256d a, __m256i b) VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_permute4x64_pd (__m256d a, __m256i b)
+ VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4032,7 +4469,10 @@
To be added.
To be added.
To be added.
- __m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b) VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b)
+ VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4061,7 +4501,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b) VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4090,7 +4533,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b) VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4119,7 +4565,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b) VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -4148,7 +4597,10 @@
To be added.
To be added.
To be added.
- __m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b) VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b)
+ VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -4177,7 +4629,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b) VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -4202,7 +4657,10 @@
To be added.
- __m128d _mm_rcp14_pd (__m128d a, __m128d b) VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128d _mm_rcp14_pd (__m128d a, __m128d b)
+ VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -4227,7 +4685,10 @@
To be added.
- __m128 _mm_rcp14_ps (__m128 a, __m128 b) VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128 _mm_rcp14_ps (__m128 a, __m128 b)
+ VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -4252,7 +4713,10 @@
To be added.
- __m256d _mm256_rcp14_pd (__m256d a, __m256d b) VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256d _mm256_rcp14_pd (__m256d a, __m256d b)
+ VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -4277,7 +4741,10 @@
To be added.
- __m256 _mm256_rcp14_ps (__m256 a, __m256 b) VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256 _mm256_rcp14_ps (__m256 a, __m256 b)
+ VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -4302,7 +4769,10 @@
To be added.
- __m128d _mm_rsqrt14_pd (__m128d a, __m128d b) VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
+ __m128d _mm_rsqrt14_pd (__m128d a, __m128d b)
+ VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -4327,7 +4797,10 @@
To be added.
- __m128 _mm_rsqrt14_ps (__m128 a, __m128 b) VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
+ __m128 _mm_rsqrt14_ps (__m128 a, __m128 b)
+ VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -4352,7 +4825,10 @@
To be added.
- __m256d _mm256_rsqrt14_pd (__m256d a, __m256d b) VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
+ __m256d _mm256_rsqrt14_pd (__m256d a, __m256d b)
+ VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -4377,7 +4853,10 @@
To be added.
- __m256 _mm256_rsqrt14_ps (__m256 a, __m256 b) VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m256 _mm256_rsqrt14_ps (__m256 a, __m256 b)
+ VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -4411,7 +4890,10 @@
To be added.
To be added.
- __m128i _mm_rol_epi32 (__m128i a, int imm8) VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
+ __m128i _mm_rol_epi32 (__m128i a, int imm8)
+ VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -4445,7 +4927,10 @@
To be added.
To be added.
- __m128i _mm_rol_epi64 (__m128i a, int imm8) VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
+ __m128i _mm_rol_epi64 (__m128i a, int imm8)
+ VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -4479,7 +4964,10 @@
To be added.
To be added.
- __m128i _mm_rol_epi32 (__m128i a, int imm8) VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
+ __m128i _mm_rol_epi32 (__m128i a, int imm8)
+ VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -4513,7 +5001,10 @@
To be added.
To be added.
- __m128i _mm_rol_epi64 (__m128i a, int imm8) VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
+ __m128i _mm_rol_epi64 (__m128i a, int imm8)
+ VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -4547,7 +5038,10 @@
To be added.
To be added.
- __m256i _mm256_rol_epi32 (__m256i a, int imm8) VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
+ __m256i _mm256_rol_epi32 (__m256i a, int imm8)
+ VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -4581,7 +5075,10 @@
To be added.
To be added.
- __m256i _mm256_rol_epi64 (__m256i a, int imm8) VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
+ __m256i _mm256_rol_epi64 (__m256i a, int imm8)
+ VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -4615,7 +5112,10 @@
To be added.
To be added.
- __m256i _mm256_rol_epi32 (__m256i a, int imm8) VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
+ __m256i _mm256_rol_epi32 (__m256i a, int imm8)
+ VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -4649,7 +5149,10 @@
To be added.
To be added.
- __m256i _mm256_rol_epi64 (__m256i a, int imm8) VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
+ __m256i _mm256_rol_epi64 (__m256i a, int imm8)
+ VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -4676,7 +5179,10 @@
To be added.
To be added.
- __m128i _mm_rolv_epi32 (__m128i a, __m128i b) VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128i _mm_rolv_epi32 (__m128i a, __m128i b)
+ VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -4703,7 +5209,10 @@
To be added.
To be added.
- __m128i _mm_rolv_epi64 (__m128i a, __m128i b) VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_rolv_epi64 (__m128i a, __m128i b)
+ VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -4730,7 +5239,10 @@
To be added.
To be added.
- __m128i _mm_rolv_epi32 (__m128i a, __m128i b) VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128i _mm_rolv_epi32 (__m128i a, __m128i b)
+ VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -4757,7 +5269,10 @@
To be added.
To be added.
- __m128i _mm_rolv_epi64 (__m128i a, __m128i b) VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_rolv_epi64 (__m128i a, __m128i b)
+ VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -4784,7 +5299,10 @@
To be added.
To be added.
- __m256i _mm256_rolv_epi32 (__m256i a, __m256i b) VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256i _mm256_rolv_epi32 (__m256i a, __m256i b)
+ VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -4811,7 +5329,10 @@
To be added.
To be added.
- __m256i _mm256_rolv_epi64 (__m256i a, __m256i b) VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_rolv_epi64 (__m256i a, __m256i b)
+ VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4838,7 +5359,10 @@
To be added.
To be added.
- __m256i _mm256_rolv_epi32 (__m256i a, __m256i b) VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256i _mm256_rolv_epi32 (__m256i a, __m256i b)
+ VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -4865,7 +5389,10 @@
To be added.
To be added.
- __m256i _mm256_rolv_epi64 (__m256i a, __m256i b) VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_rolv_epi64 (__m256i a, __m256i b)
+ VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -4899,7 +5426,10 @@
To be added.
To be added.
- __m128i _mm_ror_epi32 (__m128i a, int imm8) VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
+ __m128i _mm_ror_epi32 (__m128i a, int imm8)
+ VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -4933,7 +5463,10 @@
To be added.
To be added.
- __m128i _mm_ror_epi64 (__m128i a, int imm8) VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
+ __m128i _mm_ror_epi64 (__m128i a, int imm8)
+ VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -4967,7 +5500,10 @@
To be added.
To be added.
- __m128i _mm_ror_epi32 (__m128i a, int imm8) VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
+ __m128i _mm_ror_epi32 (__m128i a, int imm8)
+ VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -5001,7 +5537,10 @@
To be added.
To be added.
- __m128i _mm_ror_epi64 (__m128i a, int imm8) VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
+ __m128i _mm_ror_epi64 (__m128i a, int imm8)
+ VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -5035,7 +5574,10 @@
To be added.
To be added.
- __m256i _mm256_ror_epi32 (__m256i a, int imm8) VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
+ __m256i _mm256_ror_epi32 (__m256i a, int imm8)
+ VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -5069,7 +5611,10 @@
To be added.
To be added.
- __m256i _mm256_ror_epi64 (__m256i a, int imm8) VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
+ __m256i _mm256_ror_epi64 (__m256i a, int imm8)
+ VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -5103,7 +5648,10 @@
To be added.
To be added.
- __m256i _mm256_ror_epi32 (__m256i a, int imm8) VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
+ __m256i _mm256_ror_epi32 (__m256i a, int imm8)
+ VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -5137,7 +5685,10 @@
To be added.
To be added.
- __m256i _mm256_ror_epi64 (__m256i a, int imm8) VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
+ __m256i _mm256_ror_epi64 (__m256i a, int imm8)
+ VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -5164,7 +5715,10 @@
To be added.
To be added.
- __m128i _mm_rorv_epi32 (__m128i a, __m128i b) VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128i _mm_rorv_epi32 (__m128i a, __m128i b)
+ VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -5191,7 +5745,10 @@
To be added.
To be added.
- __m128i _mm_rorv_epi64 (__m128i a, __m128i b) VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_rorv_epi64 (__m128i a, __m128i b)
+ VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -5218,7 +5775,10 @@
To be added.
To be added.
- __m128i _mm_rorv_epi32 (__m128i a, __m128i b) VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128i _mm_rorv_epi32 (__m128i a, __m128i b)
+ VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -5245,7 +5805,10 @@
To be added.
To be added.
- __m128i _mm_rorv_epi64 (__m128i a, __m128i b) VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_rorv_epi64 (__m128i a, __m128i b)
+ VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -5272,7 +5835,10 @@
To be added.
To be added.
- __m256i _mm256_rorv_epi32 (__m256i a, __m256i b) VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256i _mm256_rorv_epi32 (__m256i a, __m256i b)
+ VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -5299,7 +5865,10 @@
To be added.
To be added.
- __m256i _mm256_rorv_epi64 (__m256i a, __m256i b) VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_rorv_epi64 (__m256i a, __m256i b)
+ VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -5326,7 +5895,10 @@
To be added.
To be added.
- __m256i _mm256_rorv_epi32 (__m256i a, __m256i b) VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256i _mm256_rorv_epi32 (__m256i a, __m256i b)
+ VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -5353,7 +5925,10 @@
To be added.
To be added.
- __m256i _mm256_rorv_epi64 (__m256i a, __m256i b) VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_rorv_epi64 (__m256i a, __m256i b)
+ VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -5387,7 +5962,10 @@
To be added.
To be added.
- __m128d _mm_roundscale_pd (__m128d a, int imm) VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
+ __m128d _mm_roundscale_pd (__m128d a, int imm)
+ VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -5421,7 +5999,10 @@
To be added.
To be added.
- __m128 _mm_roundscale_ps (__m128 a, int imm) VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
+ __m128 _mm_roundscale_ps (__m128 a, int imm)
+ VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -5455,7 +6036,10 @@
To be added.
To be added.
- __m256d _mm256_roundscale_pd (__m256d a, int imm) VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
+ __m256d _mm256_roundscale_pd (__m256d a, int imm)
+ VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -5489,7 +6073,10 @@
To be added.
To be added.
- __m256 _mm256_roundscale_ps (__m256 a, int imm) VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
+ __m256 _mm256_roundscale_ps (__m256 a, int imm)
+ VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -5516,7 +6103,10 @@
To be added.
To be added.
- __m128d _mm_scalef_pd (__m128d a, int imm) VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128d _mm_scalef_pd (__m128d a, int imm)
+ VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -5543,7 +6133,10 @@
To be added.
To be added.
- __m128 _mm_scalef_ps (__m128 a, int imm) VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
+ __m128 _mm_scalef_ps (__m128 a, int imm)
+ VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -5570,7 +6163,10 @@
To be added.
To be added.
- __m256d _mm256_scalef_pd (__m256d a, int imm) VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256d _mm256_scalef_pd (__m256d a, int imm)
+ VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -5597,7 +6193,10 @@
To be added.
To be added.
- __m256 _mm256_scalef_ps (__m256 a, int imm) VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
+ __m256 _mm256_scalef_ps (__m256 a, int imm)
+ VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -5631,7 +6230,10 @@
To be added.
To be added.
- __128i _mm_srai_epi64 (__m128i a, int imm8) VPSRAQ xmm1 {k1}{z}, xmm2, imm8
+
+ __128i _mm_srai_epi64 (__m128i a, int imm8)
+ VPSRAQ xmm1 {k1}{z}, xmm2, imm8
+
To be added.
To be added.
@@ -5658,7 +6260,10 @@
To be added.
To be added.
- __m128i _mm_sra_epi64 (__m128i a, __m128i count) VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_sra_epi64 (__m128i a, __m128i count)
+ VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -5692,7 +6297,10 @@
To be added.
To be added.
- __m256i _mm256_srai_epi64 (__m256i a, int imm8) VPSRAQ ymm1 {k1}{z}, ymm2, imm8
+
+ __m256i _mm256_srai_epi64 (__m256i a, int imm8)
+ VPSRAQ ymm1 {k1}{z}, ymm2, imm8
+
To be added.
To be added.
@@ -5719,7 +6327,10 @@
To be added.
To be added.
- __m256i _mm256_sra_epi64 (__m256i a, __m128i count) VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128
+
+ __m256i _mm256_sra_epi64 (__m256i a, __m128i count)
+ VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128
+
To be added.
To be added.
@@ -5746,7 +6357,10 @@
To be added.
To be added.
- __m128i _mm_srav_epi64 (__m128i a, __m128i count) VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
+ __m128i _mm_srav_epi64 (__m128i a, __m128i count)
+ VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -5773,7 +6387,10 @@
To be added.
To be added.
- __m256i _mm256_srav_epi64 (__m256i a, __m256i count) VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
+ __m256i _mm256_srav_epi64 (__m256i a, __m256i count)
+ VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -5809,7 +6426,10 @@
To be added.
To be added.
To be added.
- __m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8) VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8)
+ VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -5845,7 +6465,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8) VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)
+ VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -5881,7 +6504,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8) VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)
+ VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -5917,7 +6543,10 @@
To be added.
To be added.
To be added.
- __m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8) VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8)
+ VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -5953,7 +6582,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8) VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)
+ VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -5989,7 +6621,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8) VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)
+ VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -6027,7 +6662,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6065,7 +6703,10 @@
To be added.
To be added.
To be added.
- __m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm) VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm)
+ VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6103,7 +6744,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6141,7 +6785,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
+ __m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -6179,7 +6826,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
+ __m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -6217,7 +6867,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6255,7 +6908,10 @@
To be added.
To be added.
To be added.
- __m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6293,7 +6949,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6331,7 +6990,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
+ __m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -6369,7 +7031,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm) VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
+ __m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -6407,7 +7072,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6445,7 +7113,10 @@
To be added.
To be added.
To be added.
- __m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm) VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm)
+ VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6483,7 +7154,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6521,7 +7195,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -6559,7 +7236,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -6597,7 +7277,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6635,7 +7318,10 @@
To be added.
To be added.
To be added.
- __m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6673,7 +7359,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -6711,7 +7400,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
+ __m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -6749,7 +7441,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm) VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
+ __m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml
index 5dd23640cd2..2026c832a22 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml
@@ -40,7 +40,10 @@
To be added.
To be added.
- __m128d _mm_cvtsi64_sd (__m128d a, __int64 b) VCVTUSI2SD xmm1, xmm2, r/m64 This intrinsic is only available on 64-bit processes
+
+ __m128d _mm_cvtsi64_sd (__m128d a, __int64 b)
+ VCVTUSI2SD xmm1, xmm2, r/m64 This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -67,7 +70,10 @@
To be added.
To be added.
- __m128 _mm_cvtsi64_ss (__m128 a, __int64 b) VCVTUSI2SS xmm1, xmm2, r/m64 This intrinsic is only available on 64-bit processes
+
+ __m128 _mm_cvtsi64_ss (__m128 a, __int64 b)
+ VCVTUSI2SS xmm1, xmm2, r/m64 This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -92,7 +98,10 @@
To be added.
- unsigned __int64 _mm_cvtsd_u64 (__m128d a) VCVTSD2USI r64, xmm1/m64{er} This intrinsic is only available on 64-bit processes
+
+ unsigned __int64 _mm_cvtsd_u64 (__m128d a)
+ VCVTSD2USI r64, xmm1/m64{er} This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -117,7 +126,10 @@
To be added.
- unsigned __int64 _mm_cvtss_u64 (__m128 a) VCVTSS2USI r64, xmm1/m32{er} This intrinsic is only available on 64-bit processes
+
+ unsigned __int64 _mm_cvtss_u64 (__m128 a)
+ VCVTSS2USI r64, xmm1/m32{er} This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -142,7 +154,10 @@
To be added.
- unsigned __int64 _mm_cvttsd_u64 (__m128d a) VCVTTSD2USI r64, xmm1/m64{er} This intrinsic is only available on 64-bit processes
+
+ unsigned __int64 _mm_cvttsd_u64 (__m128d a)
+ VCVTTSD2USI r64, xmm1/m64{er} This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -167,7 +182,10 @@
To be added.
- unsigned __int64 _mm_cvttss_u64 (__m128 a) VCVTTSS2USI r64, xmm1/m32{er} This intrinsic is only available on 64-bit processes
+
+ unsigned __int64 _mm_cvttss_u64 (__m128 a)
+ VCVTTSS2USI r64, xmm1/m32{er} This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512F.xml b/xml/System.Runtime.Intrinsics.X86/Avx512F.xml
index a070f21d882..7e57081b933 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512F.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512F.xml
@@ -44,7 +44,10 @@
To be added.
- __m512i _mm512_abs_epi32 (__m512i a) VPABSD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512i _mm512_abs_epi32 (__m512i a)
+ VPABSD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -69,7 +72,10 @@
To be added.
- __m512i _mm512_abs_epi64 (__m512i a) VPABSQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512i _mm512_abs_epi64 (__m512i a)
+ VPABSQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -96,7 +102,10 @@
To be added.
To be added.
- __m512d _mm512_add_pd (__m512d a, __m512d b) VADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
+ __m512d _mm512_add_pd (__m512d a, __m512d b)
+ VADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
To be added.
To be added.
@@ -123,7 +132,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi32 (__m512i a, __m512i b) VPADDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_add_epi32 (__m512i a, __m512i b)
+ VPADDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -150,7 +162,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi64 (__m512i a, __m512i b) VPADDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_add_epi64 (__m512i a, __m512i b)
+ VPADDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -177,7 +192,10 @@
To be added.
To be added.
- __m512 _mm512_add_ps (__m512 a, __m512 b) VADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
+ __m512 _mm512_add_ps (__m512 a, __m512 b)
+ VADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
To be added.
To be added.
@@ -204,7 +222,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi32 (__m512i a, __m512i b) VPADDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_add_epi32 (__m512i a, __m512i b)
+ VPADDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -231,7 +252,10 @@
To be added.
To be added.
- __m512i _mm512_add_epi64 (__m512i a, __m512i b) VPADDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_add_epi64 (__m512i a, __m512i b)
+ VPADDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -267,7 +291,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_alignr_epi32 (__m512i a, __m512i b, const int count) VALIGND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512i _mm512_alignr_epi32 (__m512i a, __m512i b, const int count)
+ VALIGND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -303,7 +330,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_alignr_epi32 (__m512i a, __m512i b, const int count) VALIGND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512i _mm512_alignr_epi32 (__m512i a, __m512i b, const int count)
+ VALIGND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -339,7 +369,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_alignr_epi64 (__m512i a, __m512i b, const int count) VALIGNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512i _mm512_alignr_epi64 (__m512i a, __m512i b, const int count)
+ VALIGNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -375,7 +408,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_alignr_epi64 (__m512i a, __m512i b, const int count) VALIGNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512i _mm512_alignr_epi64 (__m512i a, __m512i b, const int count)
+ VALIGNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -402,7 +438,10 @@
To be added.
To be added.
- __m512i _mm512_and_si512 (__m512i a, __m512i b) VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_and_si512 (__m512i a, __m512i b)
+ VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -429,7 +468,10 @@
To be added.
To be added.
- __m512i _mm512_and_si512 (__m512i a, __m512i b) VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_and_si512 (__m512i a, __m512i b)
+ VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -456,7 +498,10 @@
To be added.
To be added.
- __m512i _mm512_and_epi32 (__m512i a, __m512i b) VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_and_epi32 (__m512i a, __m512i b)
+ VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -483,7 +528,10 @@
To be added.
To be added.
- __m512i _mm512_and_epi64 (__m512i a, __m512i b) VPANDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_and_epi64 (__m512i a, __m512i b)
+ VPANDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -510,7 +558,10 @@
To be added.
To be added.
- __m512i _mm512_and_si512 (__m512i a, __m512i b) VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_and_si512 (__m512i a, __m512i b)
+ VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -537,7 +588,10 @@
To be added.
To be added.
- __m512i _mm512_and_si512 (__m512i a, __m512i b) VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_and_si512 (__m512i a, __m512i b)
+ VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -564,7 +618,10 @@
To be added.
To be added.
- __m512i _mm512_and_epi32 (__m512i a, __m512i b) VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_and_epi32 (__m512i a, __m512i b)
+ VPANDD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -591,7 +648,10 @@
To be added.
To be added.
- __m512i _mm512_and_epi64 (__m512i a, __m512i b) VPANDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_and_epi64 (__m512i a, __m512i b)
+ VPANDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -618,7 +678,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_si512 (__m512i a, __m512i b) VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_andnot_si512 (__m512i a, __m512i b)
+ VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -645,7 +708,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_si512 (__m512i a, __m512i b) VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_andnot_si512 (__m512i a, __m512i b)
+ VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -672,7 +738,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_epi32 (__m512i a, __m512i b) VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_andnot_epi32 (__m512i a, __m512i b)
+ VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -699,7 +768,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_epi64 (__m512i a, __m512i b) VPANDNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_andnot_epi64 (__m512i a, __m512i b)
+ VPANDNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -726,7 +798,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_si512 (__m512i a, __m512i b) VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_andnot_si512 (__m512i a, __m512i b)
+ VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -753,7 +828,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_si512 (__m512i a, __m512i b) VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_andnot_si512 (__m512i a, __m512i b)
+ VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -780,7 +858,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_epi32 (__m512i a, __m512i b) VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_andnot_epi32 (__m512i a, __m512i b)
+ VPANDND zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -807,7 +888,10 @@
To be added.
To be added.
- __m512i _mm512_andnot_epi64 (__m512i a, __m512i b) VPANDNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_andnot_epi64 (__m512i a, __m512i b)
+ VPANDNQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -836,7 +920,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_blendv_pd (__m512d a, __m512d b, __m512d mask) VBLENDMPD zmm1 {k1}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_blendv_pd (__m512d a, __m512d b, __m512d mask)
+ VBLENDMPD zmm1 {k1}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -865,7 +952,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epi32 (__m512i a, __m512i b, __m512i mask) VPBLENDMD zmm1 {k1}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_blendv_epi32 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMD zmm1 {k1}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -894,7 +984,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epi64 (__m512i a, __m512i b, __m512i mask) VPBLENDMQ zmm1 {k1}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_blendv_epi64 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMQ zmm1 {k1}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -923,7 +1016,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_blendv_ps (__m512 a, __m512 b, __m512 mask) VBLENDMPS zmm1 {k1}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_blendv_ps (__m512 a, __m512 b, __m512 mask)
+ VBLENDMPS zmm1 {k1}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -952,7 +1048,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epu32 (__m512i a, __m512i b, __m512i mask) VPBLENDMD zmm1 {k1}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_blendv_epu32 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMD zmm1 {k1}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -981,7 +1080,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_blendv_epu64 (__m512i a, __m512i b, __m512i mask) VPBLENDMQ zmm1 {k1}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_blendv_epu64 (__m512i a, __m512i b, __m512i mask)
+ VPBLENDMQ zmm1 {k1}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1006,7 +1108,10 @@
To be added.
- __m512d _mm512_broadcastsd_pd (__m128d a) VBROADCASTSD zmm1 {k1}{z}, xmm2/m64
+
+ __m512d _mm512_broadcastsd_pd (__m128d a)
+ VBROADCASTSD zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -1031,7 +1136,10 @@
To be added.
- __m512i _mm512_broadcastd_epi32 (__m128i a) VPBROADCASTD zmm1 {k1}{z}, xmm2/m32
+
+ __m512i _mm512_broadcastd_epi32 (__m128i a)
+ VPBROADCASTD zmm1 {k1}{z}, xmm2/m32
+
To be added.
To be added.
@@ -1056,7 +1164,10 @@
To be added.
- __m512i _mm512_broadcastq_epi64 (__m128i a) VPBROADCASTQ zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_broadcastq_epi64 (__m128i a)
+ VPBROADCASTQ zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -1081,7 +1192,10 @@
To be added.
- __m512 _mm512_broadcastss_ps (__m128 a) VBROADCASTSS zmm1 {k1}{z}, xmm2/m32
+
+ __m512 _mm512_broadcastss_ps (__m128 a)
+ VBROADCASTSS zmm1 {k1}{z}, xmm2/m32
+
To be added.
To be added.
@@ -1106,7 +1220,10 @@
To be added.
- __m512i _mm512_broadcastd_epi32 (__m128i a) VPBROADCASTD zmm1 {k1}{z}, xmm2/m32
+
+ __m512i _mm512_broadcastd_epi32 (__m128i a)
+ VPBROADCASTD zmm1 {k1}{z}, xmm2/m32
+
To be added.
To be added.
@@ -1131,7 +1248,10 @@
To be added.
- __m512i _mm512_broadcastq_epi64 (__m128i a) VPBROADCASTQ zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_broadcastq_epi64 (__m128i a)
+ VPBROADCASTQ zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -1155,7 +1275,10 @@
To be added.
- __m512i _mm512_broadcast_i32x4 (__m128i const * mem_addr) VBROADCASTI32x4 zmm1 {k1}{z}, m128
+
+ __m512i _mm512_broadcast_i32x4 (__m128i const * mem_addr)
+ VBROADCASTI32x4 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -1179,7 +1302,10 @@
To be added.
- __m512 _mm512_broadcast_f32x4 (__m128 const * mem_addr) VBROADCASTF32x4 zmm1 {k1}{z}, m128
+
+ __m512 _mm512_broadcast_f32x4 (__m128 const * mem_addr)
+ VBROADCASTF32x4 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -1203,7 +1329,10 @@
To be added.
- __m512i _mm512_broadcast_i32x4 (__m128i const * mem_addr) VBROADCASTI32x4 zmm1 {k1}{z}, m128
+
+ __m512i _mm512_broadcast_i32x4 (__m128i const * mem_addr)
+ VBROADCASTI32x4 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -1227,7 +1356,10 @@
To be added.
- __m512d _mm512_broadcast_f64x4 (__m256d const * mem_addr) VBROADCASTF64x4 zmm1 {k1}{z}, m256
+
+ __m512d _mm512_broadcast_f64x4 (__m256d const * mem_addr)
+ VBROADCASTF64x4 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -1251,7 +1383,10 @@
To be added.
- __m512i _mm512_broadcast_i64x4 (__m256i const * mem_addr) VBROADCASTI64x4 zmm1 {k1}{z}, m256
+
+ __m512i _mm512_broadcast_i64x4 (__m256i const * mem_addr)
+ VBROADCASTI64x4 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -1275,7 +1410,10 @@
To be added.
- __m512i _mm512_broadcast_i64x4 (__m256i const * mem_addr) VBROADCASTI64x4 zmm1 {k1}{z}, m256
+
+ __m512i _mm512_broadcast_i64x4 (__m256i const * mem_addr)
+ VBROADCASTI64x4 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -1311,7 +1449,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_cmp_pd (__m512d a, __m512d b, const int imm8) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
+ __m512d _mm512_cmp_pd (__m512d a, __m512d b, const int imm8)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -1347,7 +1488,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_cmp_ps (__m512 a, __m512 b, const int imm8) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
+ __m512 _mm512_cmp_ps (__m512 a, __m512 b, const int imm8)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -1374,7 +1518,10 @@
To be added.
To be added.
- __m512d _mm512_cmpeq_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(0) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpeq_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(0) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1401,7 +1548,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epi32 (__m512i a, __m512i b) VPCMPEQD k1 {k2}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_cmpeq_epi32 (__m512i a, __m512i b)
+ VPCMPEQD k1 {k2}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -1428,7 +1578,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epi64 (__m512i a, __m512i b) VPCMPEQQ k1 {k2}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_cmpeq_epi64 (__m512i a, __m512i b)
+ VPCMPEQQ k1 {k2}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1455,7 +1608,10 @@
To be added.
To be added.
- __m512 _mm512_cmpeq_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(0) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpeq_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(0) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1482,7 +1638,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epu32 (__m512i a, __m512i b) VPCMPEQD k1 {k2}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_cmpeq_epu32 (__m512i a, __m512i b)
+ VPCMPEQD k1 {k2}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -1509,7 +1668,10 @@
To be added.
To be added.
- __m512i _mm512_cmpeq_epu64 (__m512i a, __m512i b) VPCMPEQQ k1 {k2}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_cmpeq_epu64 (__m512i a, __m512i b)
+ VPCMPEQQ k1 {k2}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1536,7 +1698,10 @@
To be added.
To be added.
- __m512d _mm512_cmpgt_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(14) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpgt_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(14) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1563,7 +1728,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epi32 (__m512i a, __m512i b) VPCMPGTD k1 {k2}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_cmpgt_epi32 (__m512i a, __m512i b)
+ VPCMPGTD k1 {k2}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -1590,7 +1758,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epi64 (__m512i a, __m512i b) VPCMPGTQ k1 {k2}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_cmpgt_epi64 (__m512i a, __m512i b)
+ VPCMPGTQ k1 {k2}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1617,7 +1788,10 @@
To be added.
To be added.
- __m512 _mm512_cmpgt_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(14) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpgt_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(14) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1644,7 +1818,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epu32 (__m512i a, __m512i b) VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(6)
+
+ __m512i _mm512_cmpgt_epu32 (__m512i a, __m512i b)
+ VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(6)
+
To be added.
To be added.
@@ -1671,7 +1848,10 @@
To be added.
To be added.
- __m512i _mm512_cmpgt_epu64 (__m512i a, __m512i b) VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(6)
+
+ __m512i _mm512_cmpgt_epu64 (__m512i a, __m512i b)
+ VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(6)
+
To be added.
To be added.
@@ -1698,7 +1878,10 @@
To be added.
To be added.
- __m512d _mm512_cmpge_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(13) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpge_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(13) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1725,7 +1908,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epi32 (__m512i a, __m512i b) VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(5)
+
+ __m512i _mm512_cmpge_epi32 (__m512i a, __m512i b)
+ VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -1752,7 +1938,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epi64 (__m512i a, __m512i b) VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(5)
+
+ __m512i _mm512_cmpge_epi64 (__m512i a, __m512i b)
+ VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -1779,7 +1968,10 @@
To be added.
To be added.
- __m512 _mm512_cmpge_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(13) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpge_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(13) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1806,7 +1998,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epu32 (__m512i a, __m512i b) VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(5)
+
+ __m512i _mm512_cmpge_epu32 (__m512i a, __m512i b)
+ VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -1833,7 +2028,10 @@
To be added.
To be added.
- __m512i _mm512_cmpge_epu64 (__m512i a, __m512i b) VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(5)
+
+ __m512i _mm512_cmpge_epu64 (__m512i a, __m512i b)
+ VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -1860,7 +2058,10 @@
To be added.
To be added.
- __m512d _mm512_cmplt_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(1) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmplt_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(1) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1887,7 +2088,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epi32 (__m512i a, __m512i b) VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(1)
+
+ __m512i _mm512_cmplt_epi32 (__m512i a, __m512i b)
+ VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -1914,7 +2118,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epi64 (__m512i a, __m512i b) VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(1)
+
+ __m512i _mm512_cmplt_epi64 (__m512i a, __m512i b)
+ VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -1941,7 +2148,10 @@
To be added.
To be added.
- __m512 _mm512_cmplt_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(1) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmplt_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(1) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -1968,7 +2178,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epu32 (__m512i a, __m512i b) VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(1)
+
+ __m512i _mm512_cmplt_epu32 (__m512i a, __m512i b)
+ VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -1995,7 +2208,10 @@
To be added.
To be added.
- __m512i _mm512_cmplt_epu64 (__m512i a, __m512i b) VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(1)
+
+ __m512i _mm512_cmplt_epu64 (__m512i a, __m512i b)
+ VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -2022,7 +2238,10 @@
To be added.
To be added.
- __m512d _mm512_cmple_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(2) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmple_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(2) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2049,7 +2268,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epi32 (__m512i a, __m512i b) VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(2)
+
+ __m512i _mm512_cmple_epi32 (__m512i a, __m512i b)
+ VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -2076,7 +2298,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epi64 (__m512i a, __m512i b) VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(2)
+
+ __m512i _mm512_cmple_epi64 (__m512i a, __m512i b)
+ VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -2103,7 +2328,10 @@
To be added.
To be added.
- __m512 _mm512_cmple_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(2) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmple_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(2) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2130,7 +2358,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epu32 (__m512i a, __m512i b) VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(2)
+
+ __m512i _mm512_cmple_epu32 (__m512i a, __m512i b)
+ VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -2157,7 +2388,10 @@
To be added.
To be added.
- __m512i _mm512_cmple_epu64 (__m512i a, __m512i b) VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(2)
+
+ __m512i _mm512_cmple_epu64 (__m512i a, __m512i b)
+ VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -2184,7 +2418,10 @@
To be added.
To be added.
- __m512d _mm512_cmpneq_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(4) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpneq_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(4) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2211,7 +2448,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epi32 (__m512i a, __m512i b) VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(4)
+
+ __m512i _mm512_cmpne_epi32 (__m512i a, __m512i b)
+ VPCMPD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -2238,7 +2478,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epi64 (__m512i a, __m512i b) VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(4)
+
+ __m512i _mm512_cmpne_epi64 (__m512i a, __m512i b)
+ VPCMPQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -2265,7 +2508,10 @@
To be added.
To be added.
- __m512 _mm512_cmpneq_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(4) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpneq_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(4) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2292,7 +2538,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epu32 (__m512i a, __m512i b) VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(4)
+
+ __m512i _mm512_cmpne_epu32 (__m512i a, __m512i b)
+ VPCMPUD k1 {k2}, zmm2, zmm3/m512/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -2319,7 +2568,10 @@
To be added.
To be added.
- __m512i _mm512_cmpne_epu64 (__m512i a, __m512i b) VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(4)
+
+ __m512i _mm512_cmpne_epu64 (__m512i a, __m512i b)
+ VPCMPUQ k1 {k2}, zmm2, zmm3/m512/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -2346,7 +2598,10 @@
To be added.
To be added.
- __m512d _mm512_cmpngt_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(10) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpngt_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(10) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2373,7 +2628,10 @@
To be added.
To be added.
- __m512 _mm512_cmpngt_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(10) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpngt_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(10) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2400,7 +2658,10 @@
To be added.
To be added.
- __m512d _mm512_cmpnge_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(9) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpnge_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(9) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2427,7 +2688,10 @@
To be added.
To be added.
- __m512 _mm512_cmpnge_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(9) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpnge_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(9) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2454,7 +2718,10 @@
To be added.
To be added.
- __m512d _mm512_cmpnlt_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(5) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpnlt_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(5) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2481,7 +2748,10 @@
To be added.
To be added.
- __m512 _mm512_cmpnlt_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(5) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpnlt_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(5) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2508,7 +2778,10 @@
To be added.
To be added.
- __m512d _mm512_cmpnle_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(6) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpnle_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(6) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2535,7 +2808,10 @@
To be added.
To be added.
- __m512 _mm512_cmpnle_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(6) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpnle_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(6) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2562,7 +2838,10 @@
To be added.
To be added.
- __m512d _mm512_cmpord_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(7) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpord_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(7) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2589,7 +2868,10 @@
To be added.
To be added.
- __m512 _mm512_cmpord_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(7) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpord_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(7) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2616,7 +2898,10 @@
To be added.
To be added.
- __m512d _mm512_cmpunord_pd (__m512d a, __m512d b) VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(3) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512d _mm512_cmpunord_pd (__m512d a, __m512d b)
+ VCMPPD k1 {k2}, zmm2, zmm3/m512/m64bcst{sae}, imm8(3) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2643,7 +2928,10 @@
To be added.
To be added.
- __m512 _mm512_cmpunord_ps (__m512 a, __m512 b) VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(3) The above native signature does not exist. We provide this additional overload for completeness.
+
+ __m512 _mm512_cmpunord_ps (__m512 a, __m512 b)
+ VCMPPS k1 {k2}, zmm2, zmm3/m512/m32bcst{sae}, imm8(3) The above native signature does not exist. We provide this additional overload for completeness.
+
To be added.
To be added.
@@ -2670,7 +2958,10 @@
To be added.
To be added.
- __m128d _mm_cvtsi32_sd (__m128d a, int b) VCVTUSI2SD xmm1, xmm2, r/m32
+
+ __m128d _mm_cvtsi32_sd (__m128d a, int b)
+ VCVTUSI2SD xmm1, xmm2, r/m32
+
To be added.
To be added.
@@ -2697,7 +2988,10 @@
To be added.
To be added.
- __m128 _mm_cvtsi32_ss (__m128 a, int b) VCVTUSI2SS xmm1, xmm2, r/m32
+
+ __m128 _mm_cvtsi32_ss (__m128 a, int b)
+ VCVTUSI2SS xmm1, xmm2, r/m32
+
To be added.
To be added.
@@ -2722,7 +3016,10 @@
To be added.
- unsigned int _mm_cvtsd_u32 (__m128d a) VCVTSD2USI r32, xmm1/m64{er}
+
+ unsigned int _mm_cvtsd_u32 (__m128d a)
+ VCVTSD2USI r32, xmm1/m64{er}
+
To be added.
To be added.
@@ -2747,7 +3044,10 @@
To be added.
- unsigned int _mm_cvtss_u32 (__m128 a) VCVTSS2USI r32, xmm1/m32{er}
+
+ unsigned int _mm_cvtss_u32 (__m128 a)
+ VCVTSS2USI r32, xmm1/m32{er}
+
To be added.
To be added.
@@ -2772,7 +3072,10 @@
To be added.
- unsigned int _mm_cvttsd_u32 (__m128d a) VCVTTSD2USI r32, xmm1/m64{er}
+
+ unsigned int _mm_cvttsd_u32 (__m128d a)
+ VCVTTSD2USI r32, xmm1/m64{er}
+
To be added.
To be added.
@@ -2797,7 +3100,10 @@
To be added.
- unsigned int _mm_cvttss_u32 (__m128 a) VCVTTSS2USI r32, xmm1/m32{er}
+
+ unsigned int _mm_cvttss_u32 (__m128 a)
+ VCVTTSS2USI r32, xmm1/m32{er}
+
To be added.
To be added.
@@ -2822,7 +3128,10 @@
To be added.
- __m128i _mm512_cvtepi32_epi8 (__m512i a) VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi32_epi8 (__m512i a)
+ VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2847,7 +3156,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi8 (__m512i a) VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi8 (__m512i a)
+ VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2872,7 +3184,10 @@
To be added.
- __m128i _mm512_cvtepi32_epi8 (__m512i a) VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi32_epi8 (__m512i a)
+ VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2897,7 +3212,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi8 (__m512i a) VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi8 (__m512i a)
+ VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2922,7 +3240,10 @@
To be added.
- __m128i _mm512_cvtusepi32_epi8 (__m512i a) VPMOVUSDB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtusepi32_epi8 (__m512i a)
+ VPMOVUSDB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2947,7 +3268,10 @@
To be added.
- __m128i _mm512_cvtusepi64_epi8 (__m512i a) VPMOVUSQB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtusepi64_epi8 (__m512i a)
+ VPMOVUSQB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2972,7 +3296,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi16 (__m512i a) VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi16 (__m512i a)
+ VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -2997,7 +3324,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi16 (__m512i a) VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi16 (__m512i a)
+ VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3022,7 +3352,10 @@
To be added.
- __m128i _mm512_cvtsepi64_epi16 (__m512i a) VPMOVSQW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtsepi64_epi16 (__m512i a)
+ VPMOVSQW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3047,7 +3380,10 @@
To be added.
- __m128i _mm512_cvtepi32_epi8 (__m512i a) VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi32_epi8 (__m512i a)
+ VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3072,7 +3408,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi8 (__m512i a) VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi8 (__m512i a)
+ VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3097,7 +3436,10 @@
To be added.
- __m128i _mm512_cvtepi32_epi8 (__m512i a) VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi32_epi8 (__m512i a)
+ VPMOVDB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3122,7 +3464,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi8 (__m512i a) VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi8 (__m512i a)
+ VPMOVQB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3147,7 +3492,10 @@
To be added.
- __m128i _mm512_cvtsepi32_epi8 (__m512i a) VPMOVSDB xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtsepi32_epi8 (__m512i a)
+ VPMOVSDB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3172,7 +3520,10 @@
To be added.
- __m128i _mm512_cvtsepi64_epi8 (__m512i a) VPMOVSQB xmm1/m64 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtsepi64_epi8 (__m512i a)
+ VPMOVSQB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3197,7 +3548,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi16 (__m512i a) VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi16 (__m512i a)
+ VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3222,7 +3576,10 @@
To be added.
- __m128i _mm512_cvtepi64_epi16 (__m512i a) VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtepi64_epi16 (__m512i a)
+ VPMOVQW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3247,7 +3604,10 @@
To be added.
- __m128i _mm512_cvtusepi64_epi16 (__m512i a) VPMOVUSQW xmm1/m128 {k1}{z}, zmm2
+
+ __m128i _mm512_cvtusepi64_epi16 (__m512i a)
+ VPMOVUSQW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3272,7 +3632,10 @@
To be added.
- __m256i _mm512_cvtepi32_epi16 (__m512i a) VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi32_epi16 (__m512i a)
+ VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3297,7 +3660,10 @@
To be added.
- __m256i _mm512_cvtepi32_epi16 (__m512i a) VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi32_epi16 (__m512i a)
+ VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3322,7 +3688,10 @@
To be added.
- __m256i _mm512_cvtsepi32_epi16 (__m512i a) VPMOVSDW ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtsepi32_epi16 (__m512i a)
+ VPMOVSDW ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3347,7 +3716,10 @@
To be added.
- __m256i _mm512_cvtpd_epi32 (__m512d a) VCVTPD2DQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m256i _mm512_cvtpd_epi32 (__m512d a)
+ VCVTPD2DQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -3372,7 +3744,10 @@
To be added.
- __m256i _mm512_cvtepi64_epi32 (__m512i a) VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi64_epi32 (__m512i a)
+ VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3397,7 +3772,10 @@
To be added.
- __m256i _mm512_cvtepi64_epi32 (__m512i a) VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi64_epi32 (__m512i a)
+ VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3422,7 +3800,10 @@
To be added.
- __m256i _mm512_cvtsepi64_epi32 (__m512i a) VPMOVSQD ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtsepi64_epi32 (__m512i a)
+ VPMOVSQD ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3447,7 +3828,10 @@
To be added.
- __m256i _mm512_cvttpd_epi32 (__m512d a) VCVTTPD2DQ ymm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
+ __m256i _mm512_cvttpd_epi32 (__m512d a)
+ VCVTTPD2DQ ymm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -3472,7 +3856,10 @@
To be added.
- __m256 _mm512_cvtpd_ps (__m512d a) VCVTPD2PS ymm1, zmm2/m512 VCVTPD2PS ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m256 _mm512_cvtpd_ps (__m512d a)
+ VCVTPD2PS ymm1, zmm2/m512 VCVTPD2PS ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -3497,7 +3884,10 @@
To be added.
- __m256i _mm512_cvtepi32_epi16 (__m512i a) VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi32_epi16 (__m512i a)
+ VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3522,7 +3912,10 @@
To be added.
- __m256i _mm512_cvtepi32_epi16 (__m512i a) VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi32_epi16 (__m512i a)
+ VPMOVDW ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3547,7 +3940,10 @@
To be added.
- __m256i _mm512_cvtusepi32_epi16 (__m512i a) VPMOVUSDW ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtusepi32_epi16 (__m512i a)
+ VPMOVUSDW ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3572,7 +3968,10 @@
To be added.
- __m256i _mm512_cvtpd_epu32 (__m512d a) VCVTPD2UDQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m256i _mm512_cvtpd_epu32 (__m512d a)
+ VCVTPD2UDQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -3597,7 +3996,10 @@
To be added.
- __m256i _mm512_cvtepi64_epi32 (__m512i a) VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi64_epi32 (__m512i a)
+ VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3622,7 +4024,10 @@
To be added.
- __m256i _mm512_cvtepi64_epi32 (__m512i a) VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtepi64_epi32 (__m512i a)
+ VPMOVQD ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3647,7 +4052,10 @@
To be added.
- __m256i _mm512_cvtusepi64_epi32 (__m512i a) VPMOVUSQD ymm1/m256 {k1}{z}, zmm2
+
+ __m256i _mm512_cvtusepi64_epi32 (__m512i a)
+ VPMOVUSQD ymm1/m256 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3672,7 +4080,10 @@
To be added.
- __m256i _mm512_cvttpd_epu32 (__m512d a) VCVTTPD2UDQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m256i _mm512_cvttpd_epu32 (__m512d a)
+ VCVTTPD2UDQ ymm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -3697,7 +4108,10 @@
To be added.
- __m512d _mm512_cvtepi32_pd (__m256i a) VCVTDQ2PD zmm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m512d _mm512_cvtepi32_pd (__m256i a)
+ VCVTDQ2PD zmm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3722,7 +4136,10 @@
To be added.
- __m512d _mm512_cvtps_pd (__m256 a) VCVTPS2PD zmm1 {k1}{z}, ymm2/m256/m32bcst{sae}
+
+ __m512d _mm512_cvtps_pd (__m256 a)
+ VCVTPS2PD zmm1 {k1}{z}, ymm2/m256/m32bcst{sae}
+
To be added.
To be added.
@@ -3747,7 +4164,10 @@
To be added.
- __m512d _mm512_cvtepu32_pd (__m256i a) VCVTUDQ2PD zmm1 {k1}{z}, ymm2/m256/m32bcst
+
+ __m512d _mm512_cvtepu32_pd (__m256i a)
+ VCVTUDQ2PD zmm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -3772,7 +4192,10 @@
To be added.
- __m512i _mm512_cvtepu8_epi32 (__m128i a) VPMOVZXBD zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepu8_epi32 (__m128i a)
+ VPMOVZXBD zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -3797,7 +4220,10 @@
To be added.
- __m512i _mm512_cvtepi8_epi32 (__m128i a) VPMOVSXBD zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepi8_epi32 (__m128i a)
+ VPMOVSXBD zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -3822,7 +4248,10 @@
To be added.
- __m512i _mm512_cvtepi16_epi32 (__m128i a) VPMOVSXWD zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepi16_epi32 (__m128i a)
+ VPMOVSXWD zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -3847,7 +4276,10 @@
To be added.
- __m512i _mm512_cvtepu16_epi32 (__m128i a) VPMOVZXWD zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepu16_epi32 (__m128i a)
+ VPMOVZXWD zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -3872,7 +4304,10 @@
To be added.
- __m512i _mm512_cvtps_epi32 (__m512 a) VCVTPS2DQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
+ __m512i _mm512_cvtps_epi32 (__m512 a)
+ VCVTPS2DQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
To be added.
To be added.
@@ -3897,7 +4332,10 @@
To be added.
- __m512i _mm512_cvttps_epi32 (__m512 a) VCVTTPS2DQ zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}
+
+ __m512i _mm512_cvttps_epi32 (__m512 a)
+ VCVTTPS2DQ zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}
+
To be added.
To be added.
@@ -3922,7 +4360,10 @@
To be added.
- __m512i _mm512_cvtepu8_epi64 (__m128i a) VPMOVZXBQ zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_cvtepu8_epi64 (__m128i a)
+ VPMOVZXBQ zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -3947,7 +4388,10 @@
To be added.
- __m512i _mm512_cvtepi16_epi64 (__m128i a) VPMOVSXWQ zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepi16_epi64 (__m128i a)
+ VPMOVSXWQ zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -3972,7 +4416,10 @@
To be added.
- __m512i _mm512_cvtepi8_epi64 (__m128i a) VPMOVSXBQ zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_cvtepi8_epi64 (__m128i a)
+ VPMOVSXBQ zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -3997,7 +4444,10 @@
To be added.
- __m512i _mm512_cvtepu16_epi64 (__m128i a) VPMOVZXWQ zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepu16_epi64 (__m128i a)
+ VPMOVZXWQ zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -4022,7 +4472,10 @@
To be added.
- __m512i _mm512_cvtepi32_epi64 (__m128i a) VPMOVSXDQ zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepi32_epi64 (__m128i a)
+ VPMOVSXDQ zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -4047,7 +4500,10 @@
To be added.
- __m512i _mm512_cvtepu32_epi64 (__m128i a) VPMOVZXDQ zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepu32_epi64 (__m128i a)
+ VPMOVZXDQ zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -4072,7 +4528,10 @@
To be added.
- __m512 _mm512_cvtepi32_ps (__m512i a) VCVTDQ2PS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
+ __m512 _mm512_cvtepi32_ps (__m512i a)
+ VCVTDQ2PS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
To be added.
To be added.
@@ -4097,7 +4556,10 @@
To be added.
- __m512 _mm512_cvtepu32_ps (__m512i a) VCVTUDQ2PS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
+ __m512 _mm512_cvtepu32_ps (__m512i a)
+ VCVTUDQ2PS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
To be added.
To be added.
@@ -4122,7 +4584,10 @@
To be added.
- __m512i _mm512_cvtepu8_epi32 (__m128i a) VPMOVZXBD zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepu8_epi32 (__m128i a)
+ VPMOVZXBD zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -4147,7 +4612,10 @@
To be added.
- __m512i _mm512_cvtepi8_epi32 (__m128i a) VPMOVSXBD zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepi8_epi32 (__m128i a)
+ VPMOVSXBD zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -4172,7 +4640,10 @@
To be added.
- __m512i _mm512_cvtepi16_epi32 (__m128i a) VPMOVSXWD zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepi16_epi32 (__m128i a)
+ VPMOVSXWD zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -4197,7 +4668,10 @@
To be added.
- __m512i _mm512_cvtepu16_epi32 (__m128i a) VPMOVZXWD zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepu16_epi32 (__m128i a)
+ VPMOVZXWD zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -4222,7 +4696,10 @@
To be added.
- __m512i _mm512_cvtps_epu32 (__m512 a) VCVTPS2UDQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
+ __m512i _mm512_cvtps_epu32 (__m512 a)
+ VCVTPS2UDQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
To be added.
To be added.
@@ -4247,7 +4724,10 @@
To be added.
- __m512i _mm512_cvttps_epu32 (__m512 a) VCVTTPS2UDQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
+ __m512i _mm512_cvttps_epu32 (__m512 a)
+ VCVTTPS2UDQ zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
To be added.
To be added.
@@ -4272,7 +4752,10 @@
To be added.
- __m512i _mm512_cvtepu8_epi64 (__m128i a) VPMOVZXBQ zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_cvtepu8_epi64 (__m128i a)
+ VPMOVZXBQ zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -4297,7 +4780,10 @@
To be added.
- __m512i _mm512_cvtepi16_epi64 (__m128i a) VPMOVSXWQ zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepi16_epi64 (__m128i a)
+ VPMOVSXWQ zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -4322,7 +4808,10 @@
To be added.
- __m512i _mm512_cvtepi8_epi64 (__m128i a) VPMOVSXBQ zmm1 {k1}{z}, xmm2/m64
+
+ __m512i _mm512_cvtepi8_epi64 (__m128i a)
+ VPMOVSXBQ zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -4347,7 +4836,10 @@
To be added.
- __m512i _mm512_cvtepu16_epi64 (__m128i a) VPMOVZXWQ zmm1 {k1}{z}, xmm2/m128
+
+ __m512i _mm512_cvtepu16_epi64 (__m128i a)
+ VPMOVZXWQ zmm1 {k1}{z}, xmm2/m128
+
To be added.
To be added.
@@ -4372,7 +4864,10 @@
To be added.
- __m512i _mm512_cvtepi32_epi64 (__m128i a) VPMOVSXDQ zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepi32_epi64 (__m128i a)
+ VPMOVSXDQ zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -4397,7 +4892,10 @@
To be added.
- __m512i _mm512_cvtepu32_epi64 (__m128i a) VPMOVZXDQ zmm1 {k1}{z}, ymm2/m256
+
+ __m512i _mm512_cvtepu32_epi64 (__m128i a)
+ VPMOVZXDQ zmm1 {k1}{z}, ymm2/m256
+
To be added.
To be added.
@@ -4424,7 +4922,10 @@
To be added.
To be added.
- __m512d _mm512_div_pd (__m512d a, __m512d b) VDIVPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
+ __m512d _mm512_div_pd (__m512d a, __m512d b)
+ VDIVPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
To be added.
To be added.
@@ -4451,7 +4952,10 @@
To be added.
To be added.
- __m512 _mm512_div_ps (__m512 a, __m512 b) VDIVPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
+ __m512 _mm512_div_ps (__m512 a, __m512 b)
+ VDIVPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
To be added.
To be added.
@@ -4476,7 +4980,10 @@
To be added.
- __m512d _mm512_movedup_pd (__m512d a) VMOVDDUP zmm1 {k1}{z}, zmm2/m512
+
+ __m512d _mm512_movedup_pd (__m512d a)
+ VMOVDDUP zmm1 {k1}{z}, zmm2/m512
+
To be added.
To be added.
@@ -4501,7 +5008,10 @@
To be added.
- __m512 _mm512_moveldup_ps (__m512 a) VMOVSLDUP zmm1 {k1}{z}, zmm2/m512
+
+ __m512 _mm512_moveldup_ps (__m512 a)
+ VMOVSLDUP zmm1 {k1}{z}, zmm2/m512
+
To be added.
To be added.
@@ -4526,7 +5036,10 @@
To be added.
- __m512 _mm512_movehdup_ps (__m512 a) VMOVSHDUP zmm1 {k1}{z}, zmm2/m512
+
+ __m512 _mm512_movehdup_ps (__m512 a)
+ VMOVSHDUP zmm1 {k1}{z}, zmm2/m512
+
To be added.
To be added.
@@ -4560,7 +5073,10 @@
To be added.
To be added.
- __m128i _mm512_extracti128_si512 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti128_si512 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4594,7 +5110,10 @@
To be added.
To be added.
- __m128d _mm512_extractf128_pd (__m512d a, const int imm8) VEXTRACTF32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128d _mm512_extractf128_pd (__m512d a, const int imm8)
+ VEXTRACTF32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4628,7 +5147,10 @@
To be added.
To be added.
- __m128i _mm512_extracti128_si512 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti128_si512 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4662,7 +5184,10 @@
To be added.
To be added.
- __m128i _mm512_extracti32x4_epi32 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti32x4_epi32 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4696,7 +5221,10 @@
To be added.
To be added.
- __m128i _mm512_extracti128_si512 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti128_si512 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4730,7 +5258,10 @@
To be added.
To be added.
- __m128i _mm512_extracti128_si512 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti128_si512 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4764,7 +5295,10 @@
To be added.
To be added.
- __m128 _mm512_extractf32x4_ps (__m512 a, const int imm8) VEXTRACTF32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128 _mm512_extractf32x4_ps (__m512 a, const int imm8)
+ VEXTRACTF32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4798,7 +5332,10 @@
To be added.
To be added.
- __m128i _mm512_extracti128_si512 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti128_si512 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4832,7 +5369,10 @@
To be added.
To be added.
- __m128i _mm512_extracti32x4_epi32 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti32x4_epi32 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4866,7 +5406,10 @@
To be added.
To be added.
- __m128i _mm512_extracti128_si512 (__m512i a, const int imm8) VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
+ __m128i _mm512_extracti128_si512 (__m512i a, const int imm8)
+ VEXTRACTI32x4 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4900,7 +5443,10 @@
To be added.
To be added.
- __m256i _mm512_extracti256_si512 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti256_si512 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4934,7 +5480,10 @@
To be added.
To be added.
- __m256d _mm512_extractf64x4_pd (__m512d a, const int imm8) VEXTRACTF64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256d _mm512_extractf64x4_pd (__m512d a, const int imm8)
+ VEXTRACTF64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -4968,7 +5517,10 @@
To be added.
To be added.
- __m256i _mm512_extracti256_si512 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti256_si512 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5002,7 +5554,10 @@
To be added.
To be added.
- __m256i _mm512_extracti256_si512 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti256_si512 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5036,7 +5591,10 @@
To be added.
To be added.
- __m256i _mm512_extracti64x4_epi64 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti64x4_epi64 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5070,7 +5628,10 @@
To be added.
To be added.
- __m256i _mm512_extracti256_si512 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti256_si512 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5104,7 +5665,10 @@
To be added.
To be added.
- __m256 _mm512_extractf256_ps (__m512 a, const int imm8) VEXTRACTF64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256 _mm512_extractf256_ps (__m512 a, const int imm8)
+ VEXTRACTF64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5138,7 +5702,10 @@
To be added.
To be added.
- __m256i _mm512_extracti256_si512 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti256_si512 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5172,7 +5739,10 @@
To be added.
To be added.
- __m256i _mm512_extracti256_si512 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti256_si512 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5206,7 +5776,10 @@
To be added.
To be added.
- __m256i _mm512_extracti64x4_epi64 (__m512i a, const int imm8) VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
+ __m256i _mm512_extracti64x4_epi64 (__m512i a, const int imm8)
+ VEXTRACTI64x4 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -5244,7 +5817,9 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fixupimm_pd(__m512d a, __m512d b, __m512i tbl, int imm); VFIXUPIMMPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
+ __m512d _mm512_fixupimm_pd(__m512d a, __m512d b, __m512i tbl, int imm); VFIXUPIMMPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -5282,7 +5857,9 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fixupimm_ps(__m512 a, __m512 b, __m512i tbl, int imm); VFIXUPIMMPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
+ __m512 _mm512_fixupimm_ps(__m512 a, __m512 b, __m512i tbl, int imm); VFIXUPIMMPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -5320,7 +5897,9 @@
To be added.
To be added.
To be added.
- __m128d _mm_fixupimm_sd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
+ __m128d _mm_fixupimm_sd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -5358,7 +5937,9 @@
To be added.
To be added.
To be added.
- __m128 _mm_fixupimm_ss(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
+ __m128 _mm_fixupimm_ss(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -5387,7 +5968,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fmadd_pd (__m512d a, __m512d b, __m512d c) VFMADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_fmadd_pd (__m512d a, __m512d b, __m512d c)
+ VFMADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -5416,7 +6000,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fmadd_ps (__m512 a, __m512 b, __m512 c) VFMADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_fmadd_ps (__m512 a, __m512 b, __m512 c)
+ VFMADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5445,7 +6032,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fnmadd_pd (__m512d a, __m512d b, __m512d c) VFNMADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_fnmadd_pd (__m512d a, __m512d b, __m512d c)
+ VFNMADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -5474,7 +6064,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fnmadd_ps (__m512 a, __m512 b, __m512 c) VFNMADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_fnmadd_ps (__m512 a, __m512 b, __m512 c)
+ VFNMADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5503,7 +6096,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fmaddsub_pd (__m512d a, __m512d b, __m512d c) VFMADDSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512d _mm512_fmaddsub_pd (__m512d a, __m512d b, __m512d c)
+ VFMADDSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5532,7 +6128,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fmaddsub_ps (__m512 a, __m512 b, __m512 c) VFMADDSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_fmaddsub_ps (__m512 a, __m512 b, __m512 c)
+ VFMADDSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5561,7 +6160,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fmsub_pd (__m512d a, __m512d b, __m512d c) VFMSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_fmsub_pd (__m512d a, __m512d b, __m512d c)
+ VFMSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -5590,7 +6192,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fmsub_ps (__m512 a, __m512 b, __m512 c) VFMSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_fmsub_ps (__m512 a, __m512 b, __m512 c)
+ VFMSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5619,7 +6224,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fmsubadd_pd (__m512d a, __m512d b, __m512d c) VFMSUBADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_fmsubadd_pd (__m512d a, __m512d b, __m512d c)
+ VFMSUBADDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -5648,7 +6256,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fmsubadd_ps (__m512 a, __m512 b, __m512 c) VFMSUBADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_fmsubadd_ps (__m512 a, __m512 b, __m512 c)
+ VFMSUBADDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5677,7 +6288,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_fnmsub_pd (__m512d a, __m512d b, __m512d c) VFNMSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_fnmsub_pd (__m512d a, __m512d b, __m512d c)
+ VFNMSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -5706,7 +6320,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_fnmsub_ps (__m512 a, __m512 b, __m512 c) VFNMSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_fnmsub_ps (__m512 a, __m512 b, __m512 c)
+ VFNMSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -5731,7 +6348,10 @@
To be added.
- __m512d _mm512_getexp_pd (__m512d a) VGETEXPPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
+ __m512d _mm512_getexp_pd (__m512d a)
+ VGETEXPPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -5756,7 +6376,10 @@
To be added.
- __m512 _mm512_getexp_ps (__m512 a) VGETEXPPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}
+
+ __m512 _mm512_getexp_ps (__m512 a)
+ VGETEXPPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}
+
To be added.
To be added.
@@ -5781,7 +6404,10 @@
To be added.
- __m128d _mm_getexp_sd (__m128d a) VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+
+ __m128d _mm_getexp_sd (__m128d a)
+ VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+
To be added.
To be added.
@@ -5806,7 +6432,10 @@
To be added.
- __m128 _mm_getexp_ss (__m128 a) VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+
+ __m128 _mm_getexp_ss (__m128 a)
+ VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+
To be added.
To be added.
@@ -5833,7 +6462,10 @@
To be added.
To be added.
- __m128d _mm_getexp_sd (__m128d a, __m128d b) VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128d _mm_getexp_sd (__m128d a, __m128d b)
+ VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -5860,7 +6492,10 @@
To be added.
To be added.
- __m128 _mm_getexp_ss (__m128 a, __m128 b) VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128 _mm_getexp_ss (__m128 a, __m128 b)
+ VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -5894,7 +6529,10 @@
To be added.
To be added.
- __m512d _mm512_getmant_pd (__m512d a) VGETMANTPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
+ __m512d _mm512_getmant_pd (__m512d a)
+ VGETMANTPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -5928,7 +6566,10 @@
To be added.
To be added.
- __m512 _mm512_getmant_ps (__m512 a) VGETMANTPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}
+
+ __m512 _mm512_getmant_ps (__m512 a)
+ VGETMANTPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}
+
To be added.
To be added.
@@ -5962,7 +6603,10 @@
To be added.
To be added.
- __m128d _mm_getmant_sd (__m128d a) VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+
+ __m128d _mm_getmant_sd (__m128d a)
+ VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+
To be added.
To be added.
@@ -5996,7 +6640,10 @@
To be added.
To be added.
- __m128 _mm_getmant_ss (__m128 a) VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+
+ __m128 _mm_getmant_ss (__m128 a)
+ VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+
To be added.
To be added.
@@ -6032,7 +6679,10 @@
To be added.
To be added.
To be added.
- __m128d _mm_getmant_sd (__m128d a, __m128d b) VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128d _mm_getmant_sd (__m128d a, __m128d b)
+ VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -6068,7 +6718,10 @@
To be added.
To be added.
To be added.
- __m128 _mm_getmant_ss (__m128 a, __m128 b) VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128 _mm_getmant_ss (__m128 a, __m128 b)
+ VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae} The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -6104,7 +6757,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6140,7 +6796,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_insertf128_pd (__m512d a, __m128d b, int imm8) VINSERTF32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512d _mm512_insertf128_pd (__m512d a, __m128d b, int imm8)
+ VINSERTF32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6176,7 +6835,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6212,7 +6874,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti32x4_epi32 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti32x4_epi32 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6248,7 +6913,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6284,7 +6952,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6320,7 +6991,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_insertf32x4_ps (__m512 a, __m128 b, int imm8) VINSERTF32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512 _mm512_insertf32x4_ps (__m512 a, __m128 b, int imm8)
+ VINSERTF32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6356,7 +7030,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6392,7 +7069,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti32x4_epi32 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti32x4_epi32 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6428,7 +7108,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8) VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
+ __m512i _mm512_inserti128_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI32x4 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -6464,7 +7147,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6500,7 +7186,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_insertf64x4_pd (__m512d a, __m256d b, int imm8) VINSERTF64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512d _mm512_insertf64x4_pd (__m512d a, __m256d b, int imm8)
+ VINSERTF64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6536,7 +7225,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6572,7 +7264,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6608,7 +7303,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti64x4_epi64 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti64x4_epi64 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6644,7 +7342,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6680,7 +7381,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_insertf256_ps (__m512 a, __m256 b, int imm8) VINSERTF64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512 _mm512_insertf256_ps (__m512 a, __m256 b, int imm8)
+ VINSERTF64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6716,7 +7420,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6752,7 +7459,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti256_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6788,7 +7498,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_inserti64x4_epi64 (__m512i a, __m256i b, const int imm8) VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
+ __m512i _mm512_inserti64x4_epi64 (__m512i a, __m256i b, const int imm8)
+ VINSERTI64x4 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -6833,7 +7546,10 @@
To be added.
- __m512i _mm512_load_si512 (__m512i const * mem_addr) VMOVDQA32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_si512 (__m512i const * mem_addr)
+ VMOVDQA32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -6857,7 +7573,10 @@
To be added.
- __m512d _mm512_load_pd (double const * mem_addr) VMOVAPD zmm1 {k1}{z}, m512
+
+ __m512d _mm512_load_pd (double const * mem_addr)
+ VMOVAPD zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -6881,7 +7600,10 @@
To be added.
- __m512i _mm512_load_si512 (__m512i const * mem_addr) VMOVDQA32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_si512 (__m512i const * mem_addr)
+ VMOVDQA32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -6905,7 +7627,10 @@
To be added.
- __m512i _mm512_load_epi32 (__m512i const * mem_addr) VMOVDQA32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_epi32 (__m512i const * mem_addr)
+ VMOVDQA32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -6929,7 +7654,10 @@
To be added.
- __m512i _mm512_load_epi64 (__m512i const * mem_addr) VMOVDQA64 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_epi64 (__m512i const * mem_addr)
+ VMOVDQA64 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -6953,7 +7681,10 @@
To be added.
- __m512i _mm512_load_si512 (__m512i const * mem_addr) VMOVDQA32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_si512 (__m512i const * mem_addr)
+ VMOVDQA32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -6977,7 +7708,10 @@
To be added.
- __m512 _mm512_load_ps (float const * mem_addr) VMOVAPS zmm1 {k1}{z}, m512
+
+ __m512 _mm512_load_ps (float const * mem_addr)
+ VMOVAPS zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7001,7 +7735,10 @@
To be added.
- __m512i _mm512_load_si512 (__m512i const * mem_addr) VMOVDQA32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_si512 (__m512i const * mem_addr)
+ VMOVDQA32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7025,7 +7762,10 @@
To be added.
- __m512i _mm512_load_epi32 (__m512i const * mem_addr) VMOVDQA32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_epi32 (__m512i const * mem_addr)
+ VMOVDQA32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7049,7 +7789,10 @@
To be added.
- __m512i _mm512_load_epi64 (__m512i const * mem_addr) VMOVDQA64 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_load_epi64 (__m512i const * mem_addr)
+ VMOVDQA64 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7073,7 +7816,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7097,7 +7843,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7121,7 +7870,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7145,7 +7897,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7169,7 +7924,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7193,7 +7951,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7217,7 +7978,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7241,7 +8005,10 @@
To be added.
- __m512i _mm512_stream_load_si512 (__m512i const* mem_addr) VMOVNTDQA zmm1, m512
+
+ __m512i _mm512_stream_load_si512 (__m512i const* mem_addr)
+ VMOVNTDQA zmm1, m512
+
To be added.
To be added.
@@ -7265,7 +8032,10 @@
To be added.
- __m512i _mm512_loadu_si512 (__m512i const * mem_addr) VMOVDQU32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_si512 (__m512i const * mem_addr)
+ VMOVDQU32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7289,7 +8059,10 @@
To be added.
- __m512d _mm512_loadu_pd (double const * mem_addr) VMOVUPD zmm1 {k1}{z}, m512
+
+ __m512d _mm512_loadu_pd (double const * mem_addr)
+ VMOVUPD zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7313,7 +8086,10 @@
To be added.
- __m512i _mm512_loadu_si512 (__m512i const * mem_addr) VMOVDQU32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_si512 (__m512i const * mem_addr)
+ VMOVDQU32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7337,7 +8113,10 @@
To be added.
- __m512i _mm512_loadu_epi32 (__m512i const * mem_addr) VMOVDQU32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi32 (__m512i const * mem_addr)
+ VMOVDQU32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7361,7 +8140,10 @@
To be added.
- __m512i _mm512_loadu_epi64 (__m512i const * mem_addr) VMOVDQU64 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi64 (__m512i const * mem_addr)
+ VMOVDQU64 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7385,7 +8167,10 @@
To be added.
- __m512i _mm512_loadu_si512 (__m512i const * mem_addr) VMOVDQU32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_si512 (__m512i const * mem_addr)
+ VMOVDQU32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7409,7 +8194,10 @@
To be added.
- __m512 _mm512_loadu_ps (float const * mem_addr) VMOVUPS zmm1 {k1}{z}, m512
+
+ __m512 _mm512_loadu_ps (float const * mem_addr)
+ VMOVUPS zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7433,7 +8221,10 @@
To be added.
- __m512i _mm512_loadu_si512 (__m512i const * mem_addr) VMOVDQU32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_si512 (__m512i const * mem_addr)
+ VMOVDQU32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7457,7 +8248,10 @@
To be added.
- __m512i _mm512_loadu_epi32 (__m512i const * mem_addr) VMOVDQU32 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi32 (__m512i const * mem_addr)
+ VMOVDQU32 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7481,7 +8275,10 @@
To be added.
- __m512i _mm512_loadu_epi64 (__m512i const * mem_addr) VMOVDQU64 zmm1 {k1}{z}, m512
+
+ __m512i _mm512_loadu_epi64 (__m512i const * mem_addr)
+ VMOVDQU64 zmm1 {k1}{z}, m512
+
To be added.
To be added.
@@ -7508,7 +8305,10 @@
To be added.
To be added.
- __m512d _mm512_max_pd (__m512d a, __m512d b) VMAXPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}
+
+ __m512d _mm512_max_pd (__m512d a, __m512d b)
+ VMAXPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -7535,7 +8335,10 @@
To be added.
To be added.
- __m512i _mm512_max_epi32 (__m512i a, __m512i b) VPMAXSD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_max_epi32 (__m512i a, __m512i b)
+ VPMAXSD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -7562,7 +8365,10 @@
To be added.
To be added.
- __m512i _mm512_max_epi64 (__m512i a, __m512i b) VPMAXSQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_max_epi64 (__m512i a, __m512i b)
+ VPMAXSQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -7589,7 +8395,10 @@
To be added.
To be added.
- __m512 _mm512_max_ps (__m512 a, __m512 b) VMAXPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}
+
+ __m512 _mm512_max_ps (__m512 a, __m512 b)
+ VMAXPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}
+
To be added.
To be added.
@@ -7616,7 +8425,10 @@
To be added.
To be added.
- __m512i _mm512_max_epu32 (__m512i a, __m512i b) VPMAXUD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_max_epu32 (__m512i a, __m512i b)
+ VPMAXUD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -7643,7 +8455,10 @@
To be added.
To be added.
- __m512i _mm512_max_epu64 (__m512i a, __m512i b) VPMAXUQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_max_epu64 (__m512i a, __m512i b)
+ VPMAXUQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -7670,7 +8485,10 @@
To be added.
To be added.
- __m512d _mm512_min_pd (__m512d a, __m512d b) VMINPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}
+
+ __m512d _mm512_min_pd (__m512d a, __m512d b)
+ VMINPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -7697,7 +8515,10 @@
To be added.
To be added.
- __m512i _mm512_min_epi32 (__m512i a, __m512i b) VPMINSD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_min_epi32 (__m512i a, __m512i b)
+ VPMINSD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -7724,7 +8545,10 @@
To be added.
To be added.
- __m512i _mm512_min_epi64 (__m512i a, __m512i b) VPMINSQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_min_epi64 (__m512i a, __m512i b)
+ VPMINSQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -7751,7 +8575,10 @@
To be added.
To be added.
- __m512 _mm512_min_ps (__m512 a, __m512 b) VMINPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}
+
+ __m512 _mm512_min_ps (__m512 a, __m512 b)
+ VMINPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}
+
To be added.
To be added.
@@ -7778,7 +8605,10 @@
To be added.
To be added.
- __m512i _mm512_min_epu32 (__m512i a, __m512i b) VPMINUD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_min_epu32 (__m512i a, __m512i b)
+ VPMINUD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -7805,7 +8635,10 @@
To be added.
To be added.
- __m512i _mm512_min_epu64 (__m512i a, __m512i b) VPMINUQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_min_epu64 (__m512i a, __m512i b)
+ VPMINUQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -7832,7 +8665,10 @@
To be added.
To be added.
- __m512d _mm512_mul_pd (__m512d a, __m512d b) VMULPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
+ __m512d _mm512_mul_pd (__m512d a, __m512d b)
+ VMULPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
To be added.
To be added.
@@ -7859,7 +8695,10 @@
To be added.
To be added.
- __m512i _mm512_mul_epi32 (__m512i a, __m512i b) VPMULDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_mul_epi32 (__m512i a, __m512i b)
+ VPMULDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -7886,7 +8725,10 @@
To be added.
To be added.
- __m512 _mm512_mul_ps (__m512 a, __m512 b) VMULPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
+ __m512 _mm512_mul_ps (__m512 a, __m512 b)
+ VMULPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
To be added.
To be added.
@@ -7913,7 +8755,10 @@
To be added.
To be added.
- __m512i _mm512_mul_epu32 (__m512i a, __m512i b) VPMULUDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_mul_epu32 (__m512i a, __m512i b)
+ VPMULUDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -7940,7 +8785,10 @@
To be added.
To be added.
- __m512i _mm512_mullo_epi32 (__m512i a, __m512i b) VPMULLD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_mullo_epi32 (__m512i a, __m512i b)
+ VPMULLD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -7967,7 +8815,10 @@
To be added.
To be added.
- __m512i _mm512_mullo_epi32 (__m512i a, __m512i b) VPMULLD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_mullo_epi32 (__m512i a, __m512i b)
+ VPMULLD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -7994,7 +8845,10 @@
To be added.
To be added.
- __m512i _mm512_or_si512 (__m512i a, __m512i b) VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_or_si512 (__m512i a, __m512i b)
+ VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8021,7 +8875,10 @@
To be added.
To be added.
- __m512i _mm512_or_si512 (__m512i a, __m512i b) VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_or_si512 (__m512i a, __m512i b)
+ VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8048,7 +8905,10 @@
To be added.
To be added.
- __m512i _mm512_or_epi32 (__m512i a, __m512i b) VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_or_epi32 (__m512i a, __m512i b)
+ VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8075,7 +8935,10 @@
To be added.
To be added.
- __m512i _mm512_or_epi64 (__m512i a, __m512i b) VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_or_epi64 (__m512i a, __m512i b)
+ VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8102,7 +8965,10 @@
To be added.
To be added.
- __m512i _mm512_or_si512 (__m512i a, __m512i b) VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_or_si512 (__m512i a, __m512i b)
+ VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8129,7 +8995,10 @@
To be added.
To be added.
- __m512i _mm512_or_si512 (__m512i a, __m512i b) VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_or_si512 (__m512i a, __m512i b)
+ VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8156,7 +9025,10 @@
To be added.
To be added.
- __m512i _mm512_or_epi32 (__m512i a, __m512i b) VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_or_epi32 (__m512i a, __m512i b)
+ VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8183,7 +9055,10 @@
To be added.
To be added.
- __m512i _mm512_or_epi64 (__m512i a, __m512i b) VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_or_epi64 (__m512i a, __m512i b)
+ VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8217,7 +9092,10 @@
To be added.
To be added.
- __m512d _mm512_permute_pd (__m512d a, int imm8) VPERMILPD zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512d _mm512_permute_pd (__m512d a, int imm8)
+ VPERMILPD zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -8251,7 +9129,10 @@
To be added.
To be added.
- __m512 _mm512_permute_ps (__m512 a, int imm8) VPERMILPS zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512 _mm512_permute_ps (__m512 a, int imm8)
+ VPERMILPS zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -8285,7 +9166,10 @@
To be added.
To be added.
- __m512d _mm512_permute4x64_pd (__m512d a, const int imm8) VPERMPD zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512d _mm512_permute4x64_pd (__m512d a, const int imm8)
+ VPERMPD zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -8319,7 +9203,10 @@
To be added.
To be added.
- __m512i _mm512_permute4x64_epi64 (__m512i a, const int imm8) VPERMQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512i _mm512_permute4x64_epi64 (__m512i a, const int imm8)
+ VPERMQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -8353,7 +9240,10 @@
To be added.
To be added.
- __m512i _mm512_permute4x64_epi64 (__m512i a, const int imm8) VPERMQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512i _mm512_permute4x64_epi64 (__m512i a, const int imm8)
+ VPERMQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -8380,7 +9270,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar16x32_epi32 (__m512i a, __m512i b) VPERMD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_permutevar16x32_epi32 (__m512i a, __m512i b)
+ VPERMD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8407,7 +9300,10 @@
To be added.
To be added.
- __m512 _mm512_permutevar16x32_ps (__m512 a, __m512i b) VPERMPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_permutevar16x32_ps (__m512 a, __m512i b)
+ VPERMPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8434,7 +9330,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar16x32_epi32 (__m512i a, __m512i b) VPERMD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_permutevar16x32_epi32 (__m512i a, __m512i b)
+ VPERMD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8463,7 +9362,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b) VPERMI2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8492,7 +9394,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_permutex2var_ps (__m512 a, __m512i idx, __m512i b) VPERMI2PS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2PS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_permutex2var_ps (__m512 a, __m512i idx, __m512i b)
+ VPERMI2PS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2PS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8521,7 +9426,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b) VPERMI2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_permutex2var_epi32 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst VPERMT2D zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8548,7 +9456,10 @@
To be added.
To be added.
- __m512d _mm512_permutevar_pd (__m512d a, __m512i b) VPERMILPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_permutevar_pd (__m512d a, __m512i b)
+ VPERMILPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8575,7 +9486,10 @@
To be added.
To be added.
- __m512 _mm512_permutevar_ps (__m512 a, __m512i b) VPERMILPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_permutevar_ps (__m512 a, __m512i b)
+ VPERMILPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -8602,7 +9516,10 @@
To be added.
To be added.
- __m512d _mm512_permutevar8x64_pd (__m512d a, __m512i b) VPERMPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_permutevar8x64_pd (__m512d a, __m512i b)
+ VPERMPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8629,7 +9546,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar8x64_epi64 (__m512i a, __m512i b) VPERMQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_permutevar8x64_epi64 (__m512i a, __m512i b)
+ VPERMQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8656,7 +9576,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar8x64_epi64 (__m512i a, __m512i b) VPERMQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_permutevar8x64_epi64 (__m512i a, __m512i b)
+ VPERMQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8685,7 +9608,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_permutex2var_pd (__m512d a, __m512i idx, __m512i b) VPERMI2PD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2PD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_permutex2var_pd (__m512d a, __m512i idx, __m512i b)
+ VPERMI2PD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2PD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8714,7 +9640,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi64 (__m512i a, __m512i idx, __m512i b) VPERMI2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_permutex2var_epi64 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8743,7 +9672,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi64 (__m512i a, __m512i idx, __m512i b) VPERMI2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_permutex2var_epi64 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst VPERMT2Q zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -8768,7 +9700,10 @@
To be added.
- __m512d _mm512_rcp14_pd (__m512d a, __m512d b) VRCP14PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512d _mm512_rcp14_pd (__m512d a, __m512d b)
+ VRCP14PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -8793,7 +9728,10 @@
To be added.
- __m512 _mm512_rcp14_ps (__m512 a, __m512 b) VRCP14PS zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512 _mm512_rcp14_ps (__m512 a, __m512 b)
+ VRCP14PS zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -8818,7 +9756,10 @@
To be added.
- __m128d _mm_rcp14_sd (__m128d a) VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+
+ __m128d _mm_rcp14_sd (__m128d a)
+ VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+
To be added.
To be added.
@@ -8843,7 +9784,10 @@
To be added.
- __m128 _mm_rcp14_ss (__m128 a) VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+
+ __m128 _mm_rcp14_ss (__m128 a)
+ VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+
To be added.
To be added.
@@ -8870,7 +9814,10 @@
To be added.
To be added.
- __m128d _mm_rcp14_sd (__m128d a, __m128d b) VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128d _mm_rcp14_sd (__m128d a, __m128d b)
+ VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -8897,7 +9844,10 @@
To be added.
To be added.
- __m128 _mm_rcp14_ss (__m128 a, __m128 b) VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128 _mm_rcp14_ss (__m128 a, __m128 b)
+ VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -8922,7 +9872,10 @@
To be added.
- __m512d _mm512_rsqrt14_pd (__m512d a, __m512d b) VRSQRT14PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
+ __m512d _mm512_rsqrt14_pd (__m512d a, __m512d b)
+ VRSQRT14PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -8947,7 +9900,10 @@
To be added.
- __m512 _mm512_rsqrt14_ps (__m512 a, __m512 b) VRSQRT14PS zmm1 {k1}{z}, zmm2/m512/m32bcst
+
+ __m512 _mm512_rsqrt14_ps (__m512 a, __m512 b)
+ VRSQRT14PS zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -8972,7 +9928,10 @@
To be added.
- __m128d _mm_rsqrt14_sd (__m128d a) VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+
+ __m128d _mm_rsqrt14_sd (__m128d a)
+ VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+
To be added.
To be added.
@@ -8997,7 +9956,10 @@
To be added.
- __m128 _mm_rsqrt14_ss (__m128 a) VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+
+ __m128 _mm_rsqrt14_ss (__m128 a)
+ VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+
To be added.
To be added.
@@ -9024,7 +9986,10 @@
To be added.
To be added.
- __m128d _mm_rsqrt14_sd (__m128d a, __m128d b) VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128d _mm_rsqrt14_sd (__m128d a, __m128d b)
+ VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9051,7 +10016,10 @@
To be added.
To be added.
- __m128 _mm_rsqrt14_ss (__m128 a, __m128 b) VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128 _mm_rsqrt14_ss (__m128 a, __m128 b)
+ VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9085,7 +10053,10 @@
To be added.
To be added.
- __m512i _mm512_rol_epi32 (__m512i a, int imm8) VPROLD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512i _mm512_rol_epi32 (__m512i a, int imm8)
+ VPROLD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -9119,7 +10090,10 @@
To be added.
To be added.
- __m512i _mm512_rol_epi64 (__m512i a, int imm8) VPROLQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512i _mm512_rol_epi64 (__m512i a, int imm8)
+ VPROLQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -9153,7 +10127,10 @@
To be added.
To be added.
- __m512i _mm512_rol_epi32 (__m512i a, int imm8) VPROLD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512i _mm512_rol_epi32 (__m512i a, int imm8)
+ VPROLD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -9187,7 +10164,10 @@
To be added.
To be added.
- __m512i _mm512_rol_epi64 (__m512i a, int imm8) VPROLQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512i _mm512_rol_epi64 (__m512i a, int imm8)
+ VPROLQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -9214,7 +10194,10 @@
To be added.
To be added.
- __m512i _mm512_rolv_epi32 (__m512i a, __m512i b) VPROLDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_rolv_epi32 (__m512i a, __m512i b)
+ VPROLDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -9241,7 +10224,10 @@
To be added.
To be added.
- __m512i _mm512_rolv_epi64 (__m512i a, __m512i b) VPROLQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_rolv_epi64 (__m512i a, __m512i b)
+ VPROLQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -9268,7 +10254,10 @@
To be added.
To be added.
- __m512i _mm512_rolv_epi32 (__m512i a, __m512i b) VPROLDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_rolv_epi32 (__m512i a, __m512i b)
+ VPROLDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -9295,7 +10284,10 @@
To be added.
To be added.
- __m512i _mm512_rolv_epi64 (__m512i a, __m512i b) VPROLQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_rolv_epi64 (__m512i a, __m512i b)
+ VPROLQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -9329,7 +10321,10 @@
To be added.
To be added.
- __m512i _mm512_ror_epi32 (__m512i a, int imm8) VPRORD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512i _mm512_ror_epi32 (__m512i a, int imm8)
+ VPRORD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -9363,7 +10358,10 @@
To be added.
To be added.
- __m512i _mm512_ror_epi64 (__m512i a, int imm8) VPRORQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512i _mm512_ror_epi64 (__m512i a, int imm8)
+ VPRORQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -9397,7 +10395,10 @@
To be added.
To be added.
- __m512i _mm512_ror_epi32 (__m512i a, int imm8) VPRORD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512i _mm512_ror_epi32 (__m512i a, int imm8)
+ VPRORD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -9431,7 +10432,10 @@
To be added.
To be added.
- __m512i _mm512_ror_epi64 (__m512i a, int imm8) VPRORQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
+ __m512i _mm512_ror_epi64 (__m512i a, int imm8)
+ VPRORQ zmm1 {k1}{z}, zmm2/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -9458,7 +10462,10 @@
To be added.
To be added.
- __m512i _mm512_rorv_epi32 (__m512i a, __m512i b) VPRORDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_rorv_epi32 (__m512i a, __m512i b)
+ VPRORDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -9485,7 +10492,10 @@
To be added.
To be added.
- __m512i _mm512_rorv_epi64 (__m512i a, __m512i b) VPRORQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_rorv_epi64 (__m512i a, __m512i b)
+ VPRORQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -9512,7 +10522,10 @@
To be added.
To be added.
- __m512i _mm512_rorv_epi32 (__m512i a, __m512i b) VPRORDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_rorv_epi32 (__m512i a, __m512i b)
+ VPRORDV zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -9539,7 +10552,10 @@
To be added.
To be added.
- __m512i _mm512_rorv_epi64 (__m512i a, __m512i b) VPRORQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_rorv_epi64 (__m512i a, __m512i b)
+ VPRORQV zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -9573,7 +10589,10 @@
To be added.
To be added.
- __m512d _mm512_roundscale_pd (__m512d a, int imm) VRNDSCALEPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}, imm8
+
+ __m512d _mm512_roundscale_pd (__m512d a, int imm)
+ VRNDSCALEPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -9607,7 +10626,10 @@
To be added.
To be added.
- __m512 _mm512_roundscale_ps (__m512 a, int imm) VRNDSCALEPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}, imm8
+
+ __m512 _mm512_roundscale_ps (__m512 a, int imm)
+ VRNDSCALEPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -9641,7 +10663,10 @@
To be added.
To be added.
- __m128d _mm_roundscale_sd (__m128d a, int imm) VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
+ __m128d _mm_roundscale_sd (__m128d a, int imm)
+ VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -9675,7 +10700,10 @@
To be added.
To be added.
- __m128 _mm_roundscale_ss (__m128 a, int imm) VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
+ __m128 _mm_roundscale_ss (__m128 a, int imm)
+ VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -9711,7 +10739,10 @@
To be added.
To be added.
To be added.
- __m128d _mm_roundscale_sd (__m128d a, __m128d b, int imm) VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128d _mm_roundscale_sd (__m128d a, __m128d b, int imm)
+ VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9747,7 +10778,10 @@
To be added.
To be added.
To be added.
- __m128 _mm_roundscale_ss (__m128 a, __m128 b, int imm) VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
+ __m128 _mm_roundscale_ss (__m128 a, __m128 b, int imm)
+ VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9774,7 +10808,10 @@
To be added.
To be added.
- __m512d _mm512_scalef_pd (__m512d a, __m512d b) VSCALEFPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
+ __m512d _mm512_scalef_pd (__m512d a, __m512d b)
+ VSCALEFPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
To be added.
To be added.
@@ -9801,7 +10838,10 @@
To be added.
To be added.
- __m512 _mm512_scalef_ps (__m512 a, __m512 b) VSCALEFPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
+ __m512 _mm512_scalef_ps (__m512 a, __m512 b)
+ VSCALEFPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
To be added.
To be added.
@@ -9828,7 +10868,10 @@
To be added.
To be added.
- __m128d _mm_scalef_sd (__m128d a, __m128d b) VSCALEFSD xmm1 {k1}{z}, xmm2, xmm3/m64{er}
+
+ __m128d _mm_scalef_sd (__m128d a, __m128d b)
+ VSCALEFSD xmm1 {k1}{z}, xmm2, xmm3/m64{er}
+
To be added.
To be added.
@@ -9855,7 +10898,10 @@
To be added.
To be added.
- __m128 _mm_scalef_ss (__m128 a, __m128 b) VSCALEFSS xmm1 {k1}{z}, xmm2, xmm3/m32{er}
+
+ __m128 _mm_scalef_ss (__m128 a, __m128 b)
+ VSCALEFSS xmm1 {k1}{z}, xmm2, xmm3/m32{er}
+
To be added.
To be added.
@@ -9889,7 +10935,10 @@
To be added.
To be added.
- __m512i _mm512_slli_epi32 (__m512i a, int imm8) VPSLLD zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_slli_epi32 (__m512i a, int imm8)
+ VPSLLD zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -9916,7 +10965,10 @@
To be added.
To be added.
- __m512i _mm512_sll_epi32 (__m512i a, __m128i count) VPSLLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_sll_epi32 (__m512i a, __m128i count)
+ VPSLLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -9950,7 +11002,10 @@
To be added.
To be added.
- __m512i _mm512_slli_epi64 (__m512i a, int imm8) VPSLLQ zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_slli_epi64 (__m512i a, int imm8)
+ VPSLLQ zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -9977,7 +11032,10 @@
To be added.
To be added.
- __m512i _mm512_sll_epi64 (__m512i a, __m128i count) VPSLLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_sll_epi64 (__m512i a, __m128i count)
+ VPSLLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10011,7 +11069,10 @@
To be added.
To be added.
- __m512i _mm512_slli_epi32 (__m512i a, int imm8) VPSLLD zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_slli_epi32 (__m512i a, int imm8)
+ VPSLLD zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10038,7 +11099,10 @@
To be added.
To be added.
- __m512i _mm512_sll_epi32 (__m512i a, __m128i count) VPSLLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_sll_epi32 (__m512i a, __m128i count)
+ VPSLLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10072,7 +11136,10 @@
To be added.
To be added.
- __m512i _mm512_slli_epi64 (__m512i a, int imm8) VPSLLQ zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_slli_epi64 (__m512i a, int imm8)
+ VPSLLQ zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10099,7 +11166,10 @@
To be added.
To be added.
- __m512i _mm512_sll_epi64 (__m512i a, __m128i count) VPSLLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_sll_epi64 (__m512i a, __m128i count)
+ VPSLLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10126,7 +11196,10 @@
To be added.
To be added.
- __m512i _mm512_sllv_epi32 (__m512i a, __m512i count) VPSLLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_sllv_epi32 (__m512i a, __m512i count)
+ VPSLLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -10153,7 +11226,10 @@
To be added.
To be added.
- __m512i _mm512_sllv_epi64 (__m512i a, __m512i count) VPSLLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_sllv_epi64 (__m512i a, __m512i count)
+ VPSLLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -10180,7 +11256,10 @@
To be added.
To be added.
- __m512i _mm512_sllv_epi32 (__m512i a, __m512i count) VPSLLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_sllv_epi32 (__m512i a, __m512i count)
+ VPSLLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -10207,7 +11286,10 @@
To be added.
To be added.
- __m512i _mm512_sllv_epi64 (__m512i a, __m512i count) VPSLLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_sllv_epi64 (__m512i a, __m512i count)
+ VPSLLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -10241,7 +11323,10 @@
To be added.
To be added.
- __m512i _mm512_srai_epi32 (__m512i a, int imm8) VPSRAD zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srai_epi32 (__m512i a, int imm8)
+ VPSRAD zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10268,7 +11353,10 @@
To be added.
To be added.
- _mm512_sra_epi32 (__m512i a, __m128i count) VPSRAD zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ _mm512_sra_epi32 (__m512i a, __m128i count)
+ VPSRAD zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10302,7 +11390,10 @@
To be added.
To be added.
- __m512i _mm512_srai_epi64 (__m512i a, int imm8) VPSRAQ zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srai_epi64 (__m512i a, int imm8)
+ VPSRAQ zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10329,7 +11420,10 @@
To be added.
To be added.
- _mm512_sra_epi64 (__m512i a, __m128i count) VPSRAQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ _mm512_sra_epi64 (__m512i a, __m128i count)
+ VPSRAQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10356,7 +11450,10 @@
To be added.
To be added.
- __m512i _mm512_srav_epi32 (__m512i a, __m512i count) VPSRAVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_srav_epi32 (__m512i a, __m512i count)
+ VPSRAVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -10383,7 +11480,10 @@
To be added.
To be added.
- __m512i _mm512_srav_epi64 (__m512i a, __m512i count) VPSRAVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_srav_epi64 (__m512i a, __m512i count)
+ VPSRAVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -10417,7 +11517,10 @@
To be added.
To be added.
- __m512i _mm512_srli_epi32 (__m512i a, int imm8) VPSRLD zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srli_epi32 (__m512i a, int imm8)
+ VPSRLD zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10444,7 +11547,10 @@
To be added.
To be added.
- __m512i _mm512_srl_epi32 (__m512i a, __m128i count) VPSRLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_srl_epi32 (__m512i a, __m128i count)
+ VPSRLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10478,7 +11584,10 @@
To be added.
To be added.
- __m512i _mm512_srli_epi64 (__m512i a, int imm8) VPSRLQ zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srli_epi64 (__m512i a, int imm8)
+ VPSRLQ zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10505,7 +11614,10 @@
To be added.
To be added.
- __m512i _mm512_srl_epi64 (__m512i a, __m128i count) VPSRLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_srl_epi64 (__m512i a, __m128i count)
+ VPSRLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10539,7 +11651,10 @@
To be added.
To be added.
- __m512i _mm512_srli_epi32 (__m512i a, int imm8) VPSRLD zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srli_epi32 (__m512i a, int imm8)
+ VPSRLD zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10566,7 +11681,10 @@
To be added.
To be added.
- __m512i _mm512_srl_epi32 (__m512i a, __m128i count) VPSRLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_srl_epi32 (__m512i a, __m128i count)
+ VPSRLD zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10600,7 +11718,10 @@
To be added.
To be added.
- __m512i _mm512_srli_epi64 (__m512i a, int imm8) VPSRLQ zmm1 {k1}{z}, zmm2, imm8
+
+ __m512i _mm512_srli_epi64 (__m512i a, int imm8)
+ VPSRLQ zmm1 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -10627,7 +11748,10 @@
To be added.
To be added.
- __m512i _mm512_srl_epi64 (__m512i a, __m128i count) VPSRLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
+ __m512i _mm512_srl_epi64 (__m512i a, __m128i count)
+ VPSRLQ zmm1 {k1}{z}, zmm2, xmm3/m128
+
To be added.
To be added.
@@ -10654,7 +11778,10 @@
To be added.
To be added.
- __m512i _mm512_srlv_epi32 (__m512i a, __m512i count) VPSRLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_srlv_epi32 (__m512i a, __m512i count)
+ VPSRLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -10681,7 +11808,10 @@
To be added.
To be added.
- __m512i _mm512_srlv_epi64 (__m512i a, __m512i count) VPSRLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_srlv_epi64 (__m512i a, __m512i count)
+ VPSRLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -10708,7 +11838,10 @@
To be added.
To be added.
- __m512i _mm512_srlv_epi32 (__m512i a, __m512i count) VPSRLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_srlv_epi32 (__m512i a, __m512i count)
+ VPSRLVD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -10735,7 +11868,10 @@
To be added.
To be added.
- __m512i _mm512_srlv_epi64 (__m512i a, __m512i count) VPSRLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_srlv_epi64 (__m512i a, __m512i count)
+ VPSRLVQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -10769,7 +11905,10 @@
To be added.
To be added.
- __m512i _mm512_shuffle_epi32 (__m512i a, const int imm8) VPSHUFD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512i _mm512_shuffle_epi32 (__m512i a, const int imm8)
+ VPSHUFD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -10803,7 +11942,10 @@
To be added.
To be added.
- __m512i _mm512_shuffle_epi32 (__m512i a, const int imm8) VPSHUFD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
+ __m512i _mm512_shuffle_epi32 (__m512i a, const int imm8)
+ VPSHUFD zmm1 {k1}{z}, zmm2/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -10839,7 +11981,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_shuffle_pd (__m512d a, __m512d b, const int imm8) VSHUFPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512d _mm512_shuffle_pd (__m512d a, __m512d b, const int imm8)
+ VSHUFPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -10875,7 +12020,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_shuffle_ps (__m512 a, __m512 b, const int imm8) VSHUFPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512 _mm512_shuffle_ps (__m512 a, __m512 b, const int imm8)
+ VSHUFPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -10911,7 +12059,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_shuffle_f64x2 (__m512d a, __m512d b, const int imm8) VSHUFF64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512d _mm512_shuffle_f64x2 (__m512d a, __m512d b, const int imm8)
+ VSHUFF64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -10947,7 +12098,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_shuffle_i32x4 (__m512i a, __m512i b, const int imm8) VSHUFI32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512i _mm512_shuffle_i32x4 (__m512i a, __m512i b, const int imm8)
+ VSHUFI32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -10983,7 +12137,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_shuffle_i64x2 (__m512i a, __m512i b, const int imm8) VSHUFI64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512i _mm512_shuffle_i64x2 (__m512i a, __m512i b, const int imm8)
+ VSHUFI64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -11019,7 +12176,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_shuffle_f32x4 (__m512 a, __m512 b, const int imm8) VSHUFF32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512 _mm512_shuffle_f32x4 (__m512 a, __m512 b, const int imm8)
+ VSHUFF32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -11055,7 +12215,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_shuffle_i32x4 (__m512i a, __m512i b, const int imm8) VSHUFI32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512i _mm512_shuffle_i32x4 (__m512i a, __m512i b, const int imm8)
+ VSHUFI32x4 zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -11091,7 +12254,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_shuffle_i64x2 (__m512i a, __m512i b, const int imm8) VSHUFI64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512i _mm512_shuffle_i64x2 (__m512i a, __m512i b, const int imm8)
+ VSHUFI64x2 zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -11116,7 +12282,10 @@
To be added.
- __m512d _mm512_sqrt_pd (__m512d a) VSQRTPD zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
+ __m512d _mm512_sqrt_pd (__m512d a)
+ VSQRTPD zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -11141,7 +12310,10 @@
To be added.
- __m512 _mm512_sqrt_ps (__m512 a) VSQRTPS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
+ __m512 _mm512_sqrt_ps (__m512 a)
+ VSQRTPS zmm1 {k1}{z}, zmm2/m512/m32bcst{er}
+
To be added.
To be added.
@@ -11167,7 +12339,10 @@
To be added.
To be added.
- void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a) VMOVDQU32 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQU32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11192,7 +12367,10 @@
To be added.
To be added.
- void _mm512_storeu_pd (double * mem_addr, __m512d a) VMOVUPD m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_pd (double * mem_addr, __m512d a)
+ VMOVUPD m512 {k1}{z}, zmm1
+
To be added.
@@ -11217,7 +12395,10 @@
To be added.
To be added.
- void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a) VMOVDQU32 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQU32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11242,7 +12423,10 @@
To be added.
To be added.
- void _mm512_storeu_epi32 (__m512i * mem_addr, __m512i a) VMOVDQU32 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi32 (__m512i * mem_addr, __m512i a)
+ VMOVDQU32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11267,7 +12451,10 @@
To be added.
To be added.
- void _mm512_storeu_epi64 (__m512i * mem_addr, __m512i a) VMOVDQU64 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi64 (__m512i * mem_addr, __m512i a)
+ VMOVDQU64 m512 {k1}{z}, zmm1
+
To be added.
@@ -11292,7 +12479,10 @@
To be added.
To be added.
- void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a) VMOVDQU32 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQU32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11317,7 +12507,10 @@
To be added.
To be added.
- void _mm512_storeu_ps (float * mem_addr, __m512 a) VMOVUPS m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_ps (float * mem_addr, __m512 a)
+ VMOVUPS m512 {k1}{z}, zmm1
+
To be added.
@@ -11342,7 +12535,10 @@
To be added.
To be added.
- void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a) VMOVDQU32 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQU32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11367,7 +12563,10 @@
To be added.
To be added.
- void _mm512_storeu_epi32 (__m512i * mem_addr, __m512i a) VMOVDQU32 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi32 (__m512i * mem_addr, __m512i a)
+ VMOVDQU32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11392,7 +12591,10 @@
To be added.
To be added.
- void _mm512_storeu_epi64 (__m512i * mem_addr, __m512i a) VMOVDQU64 m512 {k1}{z}, zmm1
+
+ void _mm512_storeu_epi64 (__m512i * mem_addr, __m512i a)
+ VMOVDQU64 m512 {k1}{z}, zmm1
+
To be added.
@@ -11417,7 +12619,10 @@
To be added.
To be added.
- void _mm512_store_si512 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11442,7 +12647,10 @@
To be added.
To be added.
- void _mm512_store_pd (double * mem_addr, __m512d a) VMOVAPD m512 {k1}{z}, zmm1
+
+ void _mm512_store_pd (double * mem_addr, __m512d a)
+ VMOVAPD m512 {k1}{z}, zmm1
+
To be added.
@@ -11467,7 +12675,10 @@
To be added.
To be added.
- void _mm512_store_si512 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11492,7 +12703,10 @@
To be added.
To be added.
- void _mm512_store_epi32 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_epi32 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11517,7 +12731,10 @@
To be added.
To be added.
- void _mm512_store_epi64 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_epi64 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11542,7 +12759,10 @@
To be added.
To be added.
- void _mm512_store_si512 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11567,7 +12787,10 @@
To be added.
To be added.
- void _mm512_store_ps (float * mem_addr, __m512 a) VMOVAPS m512 {k1}{z}, zmm1
+
+ void _mm512_store_ps (float * mem_addr, __m512 a)
+ VMOVAPS m512 {k1}{z}, zmm1
+
To be added.
@@ -11592,7 +12815,10 @@
To be added.
To be added.
- void _mm512_store_si512 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_si512 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11617,7 +12843,10 @@
To be added.
To be added.
- void _mm512_store_epi32 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_epi32 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11642,7 +12871,10 @@
To be added.
To be added.
- void _mm512_store_epi64 (__m512i * mem_addr, __m512i a) VMOVDQA32 m512 {k1}{z}, zmm1
+
+ void _mm512_store_epi64 (__m512i * mem_addr, __m512i a)
+ VMOVDQA32 m512 {k1}{z}, zmm1
+
To be added.
@@ -11667,7 +12899,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11692,7 +12927,10 @@
To be added.
To be added.
- void _mm512_stream_pd (double * mem_addr, __m512d a) VMOVNTPD m512, zmm1
+
+ void _mm512_stream_pd (double * mem_addr, __m512d a)
+ VMOVNTPD m512, zmm1
+
To be added.
@@ -11717,7 +12955,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11742,7 +12983,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11767,7 +13011,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11792,7 +13039,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11817,7 +13067,10 @@
To be added.
To be added.
- void _mm512_stream_ps (float * mem_addr, __m512 a) VMOVNTPS m512, zmm1
+
+ void _mm512_stream_ps (float * mem_addr, __m512 a)
+ VMOVNTPS m512, zmm1
+
To be added.
@@ -11842,7 +13095,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11867,7 +13123,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11892,7 +13151,10 @@
To be added.
To be added.
- void _mm512_stream_si512 (__m512i * mem_addr, __m512i a) VMOVNTDQ m512, zmm1
+
+ void _mm512_stream_si512 (__m512i * mem_addr, __m512i a)
+ VMOVNTDQ m512, zmm1
+
To be added.
@@ -11918,7 +13180,10 @@
To be added.
To be added.
- __m512d _mm512_sub_pd (__m512d a, __m512d b) VSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
+ __m512d _mm512_sub_pd (__m512d a, __m512d b)
+ VSUBPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{er}
+
To be added.
To be added.
@@ -11945,7 +13210,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi32 (__m512i a, __m512i b) VPSUBD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_sub_epi32 (__m512i a, __m512i b)
+ VPSUBD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -11972,7 +13240,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi64 (__m512i a, __m512i b) VPSUBQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_sub_epi64 (__m512i a, __m512i b)
+ VPSUBQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -11999,7 +13270,10 @@
To be added.
To be added.
- __m512 _mm512_sub_ps (__m512 a, __m512 b) VSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
+ __m512 _mm512_sub_ps (__m512 a, __m512 b)
+ VSUBPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{er}
+
To be added.
To be added.
@@ -12026,7 +13300,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi32 (__m512i a, __m512i b) VPSUBD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_sub_epi32 (__m512i a, __m512i b)
+ VPSUBD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12053,7 +13330,10 @@
To be added.
To be added.
- __m512i _mm512_sub_epi64 (__m512i a, __m512i b) VPSUBQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_sub_epi64 (__m512i a, __m512i b)
+ VPSUBQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12091,7 +13371,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, byte imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, byte imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12129,7 +13412,10 @@
To be added.
To be added.
To be added.
- __m512d _mm512_ternarylogic_pd (__m512d a, __m512d b, __m512d c, int imm) VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m512d _mm512_ternarylogic_pd (__m512d a, __m512d b, __m512d c, int imm)
+ VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12167,7 +13453,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, short imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, short imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12205,7 +13494,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_epi32 (__m512i a, __m512i b, __m512i c, int imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512i _mm512_ternarylogic_epi32 (__m512i a, __m512i b, __m512i c, int imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -12243,7 +13535,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_epi64 (__m512i a, __m512i b, __m512i c, int imm) VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512i _mm512_ternarylogic_epi64 (__m512i a, __m512i b, __m512i c, int imm)
+ VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -12281,7 +13576,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, int imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, int imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12319,7 +13617,10 @@
To be added.
To be added.
To be added.
- __m512 _mm512_ternarylogic_ps (__m512 a, __m512 b, __m512 c, int imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m512 _mm512_ternarylogic_ps (__m512 a, __m512 b, __m512 c, int imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12357,7 +13658,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, short imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
+ __m512i _mm512_ternarylogic_si512 (__m512i a, __m512i b, __m512i c, short imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12395,7 +13699,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_epi32 (__m512i a, __m512i b, __m512i c, int imm) VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
+ __m512i _mm512_ternarylogic_epi32 (__m512i a, __m512i b, __m512i c, int imm)
+ VPTERNLOGD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst, imm8
+
To be added.
To be added.
@@ -12433,7 +13740,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_ternarylogic_epi64 (__m512i a, __m512i b, __m512i c, int imm) VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
+ __m512i _mm512_ternarylogic_epi64 (__m512i a, __m512i b, __m512i c, int imm)
+ VPTERNLOGQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst, imm8
+
To be added.
To be added.
@@ -12460,7 +13770,10 @@
To be added.
To be added.
- __m512d _mm512_unpackhi_pd (__m512d a, __m512d b) VUNPCKHPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_unpackhi_pd (__m512d a, __m512d b)
+ VUNPCKHPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12487,7 +13800,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi32 (__m512i a, __m512i b) VPUNPCKHDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_unpackhi_epi32 (__m512i a, __m512i b)
+ VPUNPCKHDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12514,7 +13830,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi64 (__m512i a, __m512i b) VPUNPCKHQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_unpackhi_epi64 (__m512i a, __m512i b)
+ VPUNPCKHQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12541,7 +13860,10 @@
To be added.
To be added.
- __m512 _mm512_unpackhi_ps (__m512 a, __m512 b) VUNPCKHPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_unpackhi_ps (__m512 a, __m512 b)
+ VUNPCKHPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12568,7 +13890,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi32 (__m512i a, __m512i b) VPUNPCKHDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_unpackhi_epi32 (__m512i a, __m512i b)
+ VPUNPCKHDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12595,7 +13920,10 @@
To be added.
To be added.
- __m512i _mm512_unpackhi_epi64 (__m512i a, __m512i b) VPUNPCKHQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_unpackhi_epi64 (__m512i a, __m512i b)
+ VPUNPCKHQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12622,7 +13950,10 @@
To be added.
To be added.
- __m512d _mm512_unpacklo_pd (__m512d a, __m512d b) VUNPCKLPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512d _mm512_unpacklo_pd (__m512d a, __m512d b)
+ VUNPCKLPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12649,7 +13980,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi32 (__m512i a, __m512i b) VPUNPCKLDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_unpacklo_epi32 (__m512i a, __m512i b)
+ VPUNPCKLDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12676,7 +14010,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi64 (__m512i a, __m512i b) VPUNPCKLQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_unpacklo_epi64 (__m512i a, __m512i b)
+ VPUNPCKLQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12703,7 +14040,10 @@
To be added.
To be added.
- __m512 _mm512_unpacklo_ps (__m512 a, __m512 b) VUNPCKLPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512 _mm512_unpacklo_ps (__m512 a, __m512 b)
+ VUNPCKLPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12730,7 +14070,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi32 (__m512i a, __m512i b) VPUNPCKLDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_unpacklo_epi32 (__m512i a, __m512i b)
+ VPUNPCKLDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12757,7 +14100,10 @@
To be added.
To be added.
- __m512i _mm512_unpacklo_epi64 (__m512i a, __m512i b) VPUNPCKLQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_unpacklo_epi64 (__m512i a, __m512i b)
+ VPUNPCKLQDQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12784,7 +14130,10 @@
To be added.
To be added.
- __m512i _mm512_xor_si512 (__m512i a, __m512i b) VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_xor_si512 (__m512i a, __m512i b)
+ VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12811,7 +14160,10 @@
To be added.
To be added.
- __m512i _mm512_xor_si512 (__m512i a, __m512i b) VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_xor_si512 (__m512i a, __m512i b)
+ VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12838,7 +14190,10 @@
To be added.
To be added.
- __m512i _mm512_xor_epi32 (__m512i a, __m512i b) VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_xor_epi32 (__m512i a, __m512i b)
+ VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12865,7 +14220,10 @@
To be added.
To be added.
- __m512i _mm512_xor_epi64 (__m512i a, __m512i b) VPXORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_xor_epi64 (__m512i a, __m512i b)
+ VPXORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -12892,7 +14250,10 @@
To be added.
To be added.
- __m512i _mm512_xor_si512 (__m512i a, __m512i b) VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_xor_si512 (__m512i a, __m512i b)
+ VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12919,7 +14280,10 @@
To be added.
To be added.
- __m512i _mm512_xor_si512 (__m512i a, __m512i b) VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_xor_si512 (__m512i a, __m512i b)
+ VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12946,7 +14310,10 @@
To be added.
To be added.
- __m512i _mm512_xor_epi32 (__m512i a, __m512i b) VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
+ __m512i _mm512_xor_epi32 (__m512i a, __m512i b)
+ VPXORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -12973,7 +14340,10 @@
To be added.
To be added.
- __m512i _mm512_xor_epi64 (__m512i a, __m512i b) VPXORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
+ __m512i _mm512_xor_epi64 (__m512i a, __m512i b)
+ VPXORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml
index d6190b5d520..9abc1baba31 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml
@@ -61,7 +61,10 @@
To be added.
To be added.
- __m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b) VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b)
+ VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -88,7 +91,10 @@
To be added.
To be added.
- __m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b) VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b)
+ VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -117,7 +123,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b) VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -146,7 +155,10 @@
To be added.
To be added.
To be added.
- __m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b) VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128
+
+ __m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128 VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -173,7 +185,10 @@
To be added.
To be added.
- __m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b) VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b)
+ VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -200,7 +215,10 @@
To be added.
To be added.
- __m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b) VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b)
+ VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -229,7 +247,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b) VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -258,7 +279,10 @@
To be added.
To be added.
To be added.
- __m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b) VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256
+
+ __m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256 VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml
index 934f07758ff..ca715aa392e 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml
@@ -67,7 +67,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar64x8_epi8 (__m512i a, __m512i b) VPERMB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutevar64x8_epi8 (__m512i a, __m512i b)
+ VPERMB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -94,7 +97,10 @@
To be added.
To be added.
- __m512i _mm512_permutevar64x8_epi8 (__m512i a, __m512i b) VPERMB zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutevar64x8_epi8 (__m512i a, __m512i b)
+ VPERMB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -123,7 +129,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi8 (__m512i a, __m512i idx, __m512i b) VPERMI2B zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2B zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutex2var_epi8 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2B zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2B zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -152,7 +161,10 @@
To be added.
To be added.
To be added.
- __m512i _mm512_permutex2var_epi8 (__m512i a, __m512i idx, __m512i b) VPERMI2B zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2B zmm1 {k1}{z}, zmm2, zmm3/m512
+
+ __m512i _mm512_permutex2var_epi8 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2B zmm1 {k1}{z}, zmm2, zmm3/m512 VPERMT2B zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse3.xml b/xml/System.Runtime.Intrinsics.X86/Sse3.xml
index 0dd8974b1e5..58b2c16661c 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse3.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse3.xml
@@ -321,7 +321,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
@@ -350,7 +352,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
@@ -379,7 +383,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
@@ -408,7 +414,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
@@ -469,7 +477,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
@@ -498,7 +508,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
@@ -527,7 +539,9 @@
To be added.
- __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
+ __m128i _mm_lddqu_si128 (__m128i const* mem_addr) LDDQU xmm1, m128 VLDDQU xmm1, m128
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml b/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml
index 37f22e070cd..e547fdbacbe 100644
--- a/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml
@@ -57,7 +57,9 @@
To be added.
To be added.
To be added.
- __int64 _div128(__int64 highdividend, __int64 lowdividend, __int64 divisor, __int64* remainder) DIV reg/m64
+
+ __int64 _div128(__int64 highdividend, __int64 lowdividend, __int64 divisor, __int64* remainder) DIV reg/m64
+
To be added.
To be added.
@@ -98,7 +100,9 @@
To be added.
To be added.
To be added.
- unsigned __int64 _udiv128(unsigned __int64 highdividend, unsigned __int64 lowdividend, unsigned __int64 divisor, unsigned __int64* remainder) DIV reg/m64
+
+ unsigned __int64 _udiv128(unsigned __int64 highdividend, unsigned __int64 lowdividend, unsigned __int64 divisor, unsigned __int64* remainder) DIV reg/m64
+
To be added.
To be added.