libstdc++
simd_x86.h
1// Simd x86 specific implementations -*- C++ -*-
2
3// Copyright (C) 2020-2021 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_X86_H_
26#define _GLIBCXX_EXPERIMENTAL_SIMD_X86_H_
27
28#if __cplusplus >= 201703L
29
30#if !_GLIBCXX_SIMD_X86INTRIN
31#error \
32 "simd_x86.h may only be included when MMX or SSE on x86(_64) are available"
33#endif
34
35_GLIBCXX_SIMD_BEGIN_NAMESPACE
36
37// __to_masktype {{{
38// Given <T, N> return <__int_for_sizeof_t<T>, N>. For _SimdWrapper and
39// __vector_type_t.
40template <typename _Tp, size_t _Np>
41 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<__int_for_sizeof_t<_Tp>, _Np>
42 __to_masktype(_SimdWrapper<_Tp, _Np> __x)
43 {
44 return reinterpret_cast<__vector_type_t<__int_for_sizeof_t<_Tp>, _Np>>(
45 __x._M_data);
46 }
47
48template <typename _TV,
49 typename _TVT
50 = enable_if_t<__is_vector_type_v<_TV>, _VectorTraits<_TV>>,
51 typename _Up = __int_for_sizeof_t<typename _TVT::value_type>>
52 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Up, _TVT::_S_full_size>
53 __to_masktype(_TV __x)
54 { return reinterpret_cast<__vector_type_t<_Up, _TVT::_S_full_size>>(__x); }
55
56// }}}
57// __interleave128_lo {{{
58template <typename _Ap, typename _Bp, typename _Tp = common_type_t<_Ap, _Bp>,
59 typename _Trait = _VectorTraits<_Tp>>
60 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
61 __interleave128_lo(const _Ap& __av, const _Bp& __bv)
62 {
63 const _Tp __a(__av);
64 const _Tp __b(__bv);
65 if constexpr (sizeof(_Tp) == 16 && _Trait::_S_full_size == 2)
66 return _Tp{__a[0], __b[0]};
67 else if constexpr (sizeof(_Tp) == 16 && _Trait::_S_full_size == 4)
68 return _Tp{__a[0], __b[0], __a[1], __b[1]};
69 else if constexpr (sizeof(_Tp) == 16 && _Trait::_S_full_size == 8)
70 return _Tp{__a[0], __b[0], __a[1], __b[1],
71 __a[2], __b[2], __a[3], __b[3]};
72 else if constexpr (sizeof(_Tp) == 16 && _Trait::_S_full_size == 16)
73 return _Tp{__a[0], __b[0], __a[1], __b[1], __a[2], __b[2],
74 __a[3], __b[3], __a[4], __b[4], __a[5], __b[5],
75 __a[6], __b[6], __a[7], __b[7]};
76 else if constexpr (sizeof(_Tp) == 32 && _Trait::_S_full_size == 4)
77 return _Tp{__a[0], __b[0], __a[2], __b[2]};
78 else if constexpr (sizeof(_Tp) == 32 && _Trait::_S_full_size == 8)
79 return _Tp{__a[0], __b[0], __a[1], __b[1],
80 __a[4], __b[4], __a[5], __b[5]};
81 else if constexpr (sizeof(_Tp) == 32 && _Trait::_S_full_size == 16)
82 return _Tp{__a[0], __b[0], __a[1], __b[1], __a[2], __b[2],
83 __a[3], __b[3], __a[8], __b[8], __a[9], __b[9],
84 __a[10], __b[10], __a[11], __b[11]};
85 else if constexpr (sizeof(_Tp) == 32 && _Trait::_S_full_size == 32)
86 return _Tp{__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3],
87 __b[3], __a[4], __b[4], __a[5], __b[5], __a[6], __b[6],
88 __a[7], __b[7], __a[16], __b[16], __a[17], __b[17], __a[18],
89 __b[18], __a[19], __b[19], __a[20], __b[20], __a[21], __b[21],
90 __a[22], __b[22], __a[23], __b[23]};
91 else if constexpr (sizeof(_Tp) == 64 && _Trait::_S_full_size == 8)
92 return _Tp{__a[0], __b[0], __a[2], __b[2],
93 __a[4], __b[4], __a[6], __b[6]};
94 else if constexpr (sizeof(_Tp) == 64 && _Trait::_S_full_size == 16)
95 return _Tp{__a[0], __b[0], __a[1], __b[1], __a[4], __b[4],
96 __a[5], __b[5], __a[8], __b[8], __a[9], __b[9],
97 __a[12], __b[12], __a[13], __b[13]};
98 else if constexpr (sizeof(_Tp) == 64 && _Trait::_S_full_size == 32)
99 return _Tp{__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3],
100 __b[3], __a[8], __b[8], __a[9], __b[9], __a[10], __b[10],
101 __a[11], __b[11], __a[16], __b[16], __a[17], __b[17], __a[18],
102 __b[18], __a[19], __b[19], __a[24], __b[24], __a[25], __b[25],
103 __a[26], __b[26], __a[27], __b[27]};
104 else if constexpr (sizeof(_Tp) == 64 && _Trait::_S_full_size == 64)
105 return _Tp{__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3],
106 __b[3], __a[4], __b[4], __a[5], __b[5], __a[6], __b[6],
107 __a[7], __b[7], __a[16], __b[16], __a[17], __b[17], __a[18],
108 __b[18], __a[19], __b[19], __a[20], __b[20], __a[21], __b[21],
109 __a[22], __b[22], __a[23], __b[23], __a[32], __b[32], __a[33],
110 __b[33], __a[34], __b[34], __a[35], __b[35], __a[36], __b[36],
111 __a[37], __b[37], __a[38], __b[38], __a[39], __b[39], __a[48],
112 __b[48], __a[49], __b[49], __a[50], __b[50], __a[51], __b[51],
113 __a[52], __b[52], __a[53], __b[53], __a[54], __b[54], __a[55],
114 __b[55]};
115 else
116 __assert_unreachable<_Tp>();
117 }
118
119// }}}
120// __is_zero{{{
121template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
122 _GLIBCXX_SIMD_INTRINSIC constexpr bool
123 __is_zero(_Tp __a)
124 {
125 if (!__builtin_is_constant_evaluated())
126 {
127 if constexpr (__have_avx)
128 {
129 if constexpr (_TVT::template _S_is<float, 8>)
130 return _mm256_testz_ps(__a, __a);
131 else if constexpr (_TVT::template _S_is<double, 4>)
132 return _mm256_testz_pd(__a, __a);
133 else if constexpr (sizeof(_Tp) == 32)
134 return _mm256_testz_si256(__to_intrin(__a), __to_intrin(__a));
135 else if constexpr (_TVT::template _S_is<float>)
136 return _mm_testz_ps(__to_intrin(__a), __to_intrin(__a));
137 else if constexpr (_TVT::template _S_is<double, 2>)
138 return _mm_testz_pd(__a, __a);
139 else
140 return _mm_testz_si128(__to_intrin(__a), __to_intrin(__a));
141 }
142 else if constexpr (__have_sse4_1)
143 return _mm_testz_si128(__intrin_bitcast<__m128i>(__a),
144 __intrin_bitcast<__m128i>(__a));
145 }
146 else if constexpr (sizeof(_Tp) <= 8)
147 return reinterpret_cast<__int_for_sizeof_t<_Tp>>(__a) == 0;
148 else
149 {
150 const auto __b = __vector_bitcast<_LLong>(__a);
151 if constexpr (sizeof(__b) == 16)
152 return (__b[0] | __b[1]) == 0;
153 else if constexpr (sizeof(__b) == 32)
154 return __is_zero(__lo128(__b) | __hi128(__b));
155 else if constexpr (sizeof(__b) == 64)
156 return __is_zero(__lo256(__b) | __hi256(__b));
157 else
158 __assert_unreachable<_Tp>();
159 }
160 }
161
162// }}}
163// __movemask{{{
164template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
165 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST int
166 __movemask(_Tp __a)
167 {
168 if constexpr (sizeof(_Tp) == 32)
169 {
170 if constexpr (_TVT::template _S_is<float>)
171 return _mm256_movemask_ps(__to_intrin(__a));
172 else if constexpr (_TVT::template _S_is<double>)
173 return _mm256_movemask_pd(__to_intrin(__a));
174 else
175 return _mm256_movemask_epi8(__to_intrin(__a));
176 }
177 else if constexpr (_TVT::template _S_is<float>)
178 return _mm_movemask_ps(__to_intrin(__a));
179 else if constexpr (_TVT::template _S_is<double>)
180 return _mm_movemask_pd(__to_intrin(__a));
181 else
182 return _mm_movemask_epi8(__to_intrin(__a));
183 }
184
185// }}}
186// __testz{{{
187template <typename _TI, typename _TVT = _VectorTraits<_TI>>
188 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST constexpr int
189 __testz(_TI __a, _TI __b)
190 {
191 static_assert(is_same_v<_TI, __intrinsic_type_t<typename _TVT::value_type,
192 _TVT::_S_full_size>>);
193 if (!__builtin_is_constant_evaluated())
194 {
195 if constexpr (sizeof(_TI) == 32)
196 {
197 if constexpr (_TVT::template _S_is<float>)
198 return _mm256_testz_ps(__to_intrin(__a), __to_intrin(__b));
199 else if constexpr (_TVT::template _S_is<double>)
200 return _mm256_testz_pd(__to_intrin(__a), __to_intrin(__b));
201 else
202 return _mm256_testz_si256(__to_intrin(__a), __to_intrin(__b));
203 }
204 else if constexpr (_TVT::template _S_is<float> && __have_avx)
205 return _mm_testz_ps(__to_intrin(__a), __to_intrin(__b));
206 else if constexpr (_TVT::template _S_is<double> && __have_avx)
207 return _mm_testz_pd(__to_intrin(__a), __to_intrin(__b));
208 else if constexpr (__have_sse4_1)
209 return _mm_testz_si128(__intrin_bitcast<__m128i>(__to_intrin(__a)),
210 __intrin_bitcast<__m128i>(__to_intrin(__b)));
211 else
212 return __movemask(0 == __and(__a, __b)) != 0;
213 }
214 else
215 return __is_zero(__and(__a, __b));
216 }
217
218// }}}
219// __testc{{{
220// requires SSE4.1 or above
221template <typename _TI, typename _TVT = _VectorTraits<_TI>>
222 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST constexpr int
223 __testc(_TI __a, _TI __b)
224 {
225 static_assert(is_same_v<_TI, __intrinsic_type_t<typename _TVT::value_type,
226 _TVT::_S_full_size>>);
227 if (__builtin_is_constant_evaluated())
228 return __is_zero(__andnot(__a, __b));
229
230 if constexpr (sizeof(_TI) == 32)
231 {
232 if constexpr (_TVT::template _S_is<float>)
233 return _mm256_testc_ps(__a, __b);
234 else if constexpr (_TVT::template _S_is<double>)
235 return _mm256_testc_pd(__a, __b);
236 else
237 return _mm256_testc_si256(__to_intrin(__a), __to_intrin(__b));
238 }
239 else if constexpr (_TVT::template _S_is<float> && __have_avx)
240 return _mm_testc_ps(__to_intrin(__a), __to_intrin(__b));
241 else if constexpr (_TVT::template _S_is<double> && __have_avx)
242 return _mm_testc_pd(__to_intrin(__a), __to_intrin(__b));
243 else
244 {
245 static_assert(is_same_v<_TI, _TI> && __have_sse4_1);
246 return _mm_testc_si128(__intrin_bitcast<__m128i>(__to_intrin(__a)),
247 __intrin_bitcast<__m128i>(__to_intrin(__b)));
248 }
249 }
250
251// }}}
252// __testnzc{{{
253template <typename _TI, typename _TVT = _VectorTraits<_TI>>
254 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST constexpr int
255 __testnzc(_TI __a, _TI __b)
256 {
257 static_assert(is_same_v<_TI, __intrinsic_type_t<typename _TVT::value_type,
258 _TVT::_S_full_size>>);
259 if (!__builtin_is_constant_evaluated())
260 {
261 if constexpr (sizeof(_TI) == 32)
262 {
263 if constexpr (_TVT::template _S_is<float>)
264 return _mm256_testnzc_ps(__a, __b);
265 else if constexpr (_TVT::template _S_is<double>)
266 return _mm256_testnzc_pd(__a, __b);
267 else
268 return _mm256_testnzc_si256(__to_intrin(__a), __to_intrin(__b));
269 }
270 else if constexpr (_TVT::template _S_is<float> && __have_avx)
271 return _mm_testnzc_ps(__to_intrin(__a), __to_intrin(__b));
272 else if constexpr (_TVT::template _S_is<double> && __have_avx)
273 return _mm_testnzc_pd(__to_intrin(__a), __to_intrin(__b));
274 else if constexpr (__have_sse4_1)
275 return _mm_testnzc_si128(__intrin_bitcast<__m128i>(__to_intrin(__a)),
276 __intrin_bitcast<__m128i>(__to_intrin(__b)));
277 else
278 return __movemask(0 == __and(__a, __b)) == 0
279 && __movemask(0 == __andnot(__a, __b)) == 0;
280 }
281 else
282 return !(__is_zero(__and(__a, __b)) || __is_zero(__andnot(__a, __b)));
283 }
284
285// }}}
286// __xzyw{{{
287// shuffles the complete vector, swapping the inner two quarters. Often useful
288// for AVX for fixing up a shuffle result.
289template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
290 _GLIBCXX_SIMD_INTRINSIC _Tp
291 __xzyw(_Tp __a)
292 {
293 if constexpr (sizeof(_Tp) == 16)
294 {
295 const auto __x = __vector_bitcast<conditional_t<
296 is_floating_point_v<typename _TVT::value_type>, float, int>>(__a);
297 return reinterpret_cast<_Tp>(
298 decltype(__x){__x[0], __x[2], __x[1], __x[3]});
299 }
300 else if constexpr (sizeof(_Tp) == 32)
301 {
302 const auto __x = __vector_bitcast<conditional_t<
303 is_floating_point_v<typename _TVT::value_type>, double, _LLong>>(__a);
304 return reinterpret_cast<_Tp>(
305 decltype(__x){__x[0], __x[2], __x[1], __x[3]});
306 }
307 else if constexpr (sizeof(_Tp) == 64)
308 {
309 const auto __x = __vector_bitcast<conditional_t<
310 is_floating_point_v<typename _TVT::value_type>, double, _LLong>>(__a);
311 return reinterpret_cast<_Tp>(decltype(__x){__x[0], __x[1], __x[4],
312 __x[5], __x[2], __x[3],
313 __x[6], __x[7]});
314 }
315 else
316 __assert_unreachable<_Tp>();
317 }
318
319// }}}
320// __maskload_epi32{{{
321template <typename _Tp>
322 _GLIBCXX_SIMD_INTRINSIC auto
323 __maskload_epi32(const int* __ptr, _Tp __k)
324 {
325 if constexpr (sizeof(__k) == 16)
326 return _mm_maskload_epi32(__ptr, __k);
327 else
328 return _mm256_maskload_epi32(__ptr, __k);
329 }
330
331// }}}
332// __maskload_epi64{{{
333template <typename _Tp>
334 _GLIBCXX_SIMD_INTRINSIC auto
335 __maskload_epi64(const _LLong* __ptr, _Tp __k)
336 {
337 if constexpr (sizeof(__k) == 16)
338 return _mm_maskload_epi64(__ptr, __k);
339 else
340 return _mm256_maskload_epi64(__ptr, __k);
341 }
342
343// }}}
344// __maskload_ps{{{
345template <typename _Tp>
346 _GLIBCXX_SIMD_INTRINSIC auto
347 __maskload_ps(const float* __ptr, _Tp __k)
348 {
349 if constexpr (sizeof(__k) == 16)
350 return _mm_maskload_ps(__ptr, __k);
351 else
352 return _mm256_maskload_ps(__ptr, __k);
353 }
354
355// }}}
356// __maskload_pd{{{
357template <typename _Tp>
358 _GLIBCXX_SIMD_INTRINSIC auto
359 __maskload_pd(const double* __ptr, _Tp __k)
360 {
361 if constexpr (sizeof(__k) == 16)
362 return _mm_maskload_pd(__ptr, __k);
363 else
364 return _mm256_maskload_pd(__ptr, __k);
365 }
366
367// }}}
368
369#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
370#include "simd_x86_conversions.h"
371#endif
372
373// ISA & type detection {{{
374template <typename _Tp, size_t _Np>
375 constexpr bool
376 __is_sse_ps()
377 {
378 return __have_sse
379 && is_same_v<_Tp,
380 float> && sizeof(__intrinsic_type_t<_Tp, _Np>) == 16;
381 }
382
383template <typename _Tp, size_t _Np>
384 constexpr bool
385 __is_sse_pd()
386 {
387 return __have_sse2
388 && is_same_v<_Tp,
389 double> && sizeof(__intrinsic_type_t<_Tp, _Np>) == 16;
390 }
391
392template <typename _Tp, size_t _Np>
393 constexpr bool
394 __is_avx_ps()
395 {
396 return __have_avx
397 && is_same_v<_Tp,
398 float> && sizeof(__intrinsic_type_t<_Tp, _Np>) == 32;
399 }
400
401template <typename _Tp, size_t _Np>
402 constexpr bool
403 __is_avx_pd()
404 {
405 return __have_avx
406 && is_same_v<_Tp,
407 double> && sizeof(__intrinsic_type_t<_Tp, _Np>) == 32;
408 }
409
410template <typename _Tp, size_t _Np>
411 constexpr bool
412 __is_avx512_ps()
413 {
414 return __have_avx512f
415 && is_same_v<_Tp,
416 float> && sizeof(__intrinsic_type_t<_Tp, _Np>) == 64;
417 }
418
419template <typename _Tp, size_t _Np>
420 constexpr bool
421 __is_avx512_pd()
422 {
423 return __have_avx512f
424 && is_same_v<_Tp,
425 double> && sizeof(__intrinsic_type_t<_Tp, _Np>) == 64;
426 }
427
428// }}}
429struct _MaskImplX86Mixin;
430
431// _CommonImplX86 {{{
432struct _CommonImplX86 : _CommonImplBuiltin
433{
434#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
435 // _S_converts_via_decomposition {{{
436 template <typename _From, typename _To, size_t _ToSize>
437 static constexpr bool _S_converts_via_decomposition()
438 {
439 if constexpr (is_integral_v<
440 _From> && is_integral_v<_To> && sizeof(_From) == 8
441 && _ToSize == 16)
442 return (sizeof(_To) == 2 && !__have_ssse3)
443 || (sizeof(_To) == 1 && !__have_avx512f);
444 else if constexpr (is_floating_point_v<_From> && is_integral_v<_To>)
445 return ((sizeof(_From) == 4 || sizeof(_From) == 8) && sizeof(_To) == 8
446 && !__have_avx512dq)
447 || (sizeof(_From) == 8 && sizeof(_To) == 4 && !__have_sse4_1
448 && _ToSize == 16);
449 else if constexpr (
450 is_integral_v<_From> && is_floating_point_v<_To> && sizeof(_From) == 8
451 && !__have_avx512dq)
452 return (sizeof(_To) == 4 && _ToSize == 16)
453 || (sizeof(_To) == 8 && _ToSize < 64);
454 else
455 return false;
456 }
457
458 template <typename _From, typename _To, size_t _ToSize>
459 static inline constexpr bool __converts_via_decomposition_v
460 = _S_converts_via_decomposition<_From, _To, _ToSize>();
461
462 // }}}
463#endif
464 // _S_store {{{
465 using _CommonImplBuiltin::_S_store;
466
467 template <typename _Tp, size_t _Np>
468 _GLIBCXX_SIMD_INTRINSIC static void _S_store(_SimdWrapper<_Tp, _Np> __x,
469 void* __addr)
470 {
471 constexpr size_t _Bytes = _Np * sizeof(_Tp);
472
473 if constexpr ((_Bytes & (_Bytes - 1)) != 0 && __have_avx512bw_vl)
474 {
475 const auto __v = __to_intrin(__x);
476
477 if constexpr (_Bytes & 1)
478 {
479 if constexpr (_Bytes < 16)
480 _mm_mask_storeu_epi8(__addr, 0xffffu >> (16 - _Bytes),
481 __intrin_bitcast<__m128i>(__v));
482 else if constexpr (_Bytes < 32)
483 _mm256_mask_storeu_epi8(__addr, 0xffffffffu >> (32 - _Bytes),
484 __intrin_bitcast<__m256i>(__v));
485 else
486 _mm512_mask_storeu_epi8(__addr,
487 0xffffffffffffffffull >> (64 - _Bytes),
488 __intrin_bitcast<__m512i>(__v));
489 }
490 else if constexpr (_Bytes & 2)
491 {
492 if constexpr (_Bytes < 16)
493 _mm_mask_storeu_epi16(__addr, 0xffu >> (8 - _Bytes / 2),
494 __intrin_bitcast<__m128i>(__v));
495 else if constexpr (_Bytes < 32)
496 _mm256_mask_storeu_epi16(__addr, 0xffffu >> (16 - _Bytes / 2),
497 __intrin_bitcast<__m256i>(__v));
498 else
499 _mm512_mask_storeu_epi16(__addr,
500 0xffffffffull >> (32 - _Bytes / 2),
501 __intrin_bitcast<__m512i>(__v));
502 }
503 else if constexpr (_Bytes & 4)
504 {
505 if constexpr (_Bytes < 16)
506 _mm_mask_storeu_epi32(__addr, 0xfu >> (4 - _Bytes / 4),
507 __intrin_bitcast<__m128i>(__v));
508 else if constexpr (_Bytes < 32)
509 _mm256_mask_storeu_epi32(__addr, 0xffu >> (8 - _Bytes / 4),
510 __intrin_bitcast<__m256i>(__v));
511 else
512 _mm512_mask_storeu_epi32(__addr, 0xffffull >> (16 - _Bytes / 4),
513 __intrin_bitcast<__m512i>(__v));
514 }
515 else
516 {
517 static_assert(
518 _Bytes > 16,
519 "_Bytes < 16 && (_Bytes & 7) == 0 && (_Bytes & (_Bytes "
520 "- 1)) != 0 is impossible");
521 if constexpr (_Bytes < 32)
522 _mm256_mask_storeu_epi64(__addr, 0xfu >> (4 - _Bytes / 8),
523 __intrin_bitcast<__m256i>(__v));
524 else
525 _mm512_mask_storeu_epi64(__addr, 0xffull >> (8 - _Bytes / 8),
526 __intrin_bitcast<__m512i>(__v));
527 }
528 }
529 else
530 _CommonImplBuiltin::_S_store(__x, __addr);
531 }
532
533 // }}}
534 // _S_store_bool_array(_BitMask) {{{
535 template <size_t _Np, bool _Sanitized>
536 _GLIBCXX_SIMD_INTRINSIC static constexpr void
537 _S_store_bool_array(const _BitMask<_Np, _Sanitized> __x, bool* __mem)
538 {
539 if constexpr (__have_avx512bw_vl) // don't care for BW w/o VL
540 _S_store<_Np>(1 & __vector_bitcast<_UChar, _Np>([=]() constexpr {
541 if constexpr (_Np <= 16)
542 return _mm_movm_epi8(__x._M_to_bits());
543 else if constexpr (_Np <= 32)
544 return _mm256_movm_epi8(__x._M_to_bits());
545 else if constexpr (_Np <= 64)
546 return _mm512_movm_epi8(__x._M_to_bits());
547 else
548 __assert_unreachable<_SizeConstant<_Np>>();
549 }()),
550 __mem);
551 else if constexpr (__have_bmi2)
552 {
553 if constexpr (_Np <= 4)
554 _S_store<_Np>(_pdep_u32(__x._M_to_bits(), 0x01010101U), __mem);
555 else
556 __execute_n_times<__div_roundup(_Np, sizeof(size_t))>(
557 [&](auto __i) {
558 constexpr size_t __offset = __i * sizeof(size_t);
559 constexpr int __todo = std::min(sizeof(size_t), _Np - __offset);
560 if constexpr (__todo == 1)
561 __mem[__offset] = __x[__offset];
562 else
563 {
564 const auto __bools =
565#ifdef __x86_64__
566 _pdep_u64(__x.template _M_extract<__offset>().to_ullong(),
567 0x0101010101010101ULL);
568#else // __x86_64__
569 _pdep_u32(
570 __x.template _M_extract<__offset>()._M_to_bits(),
571 0x01010101U);
572#endif // __x86_64__
573 _S_store<__todo>(__bools, __mem + __offset);
574 }
575 });
576 }
577 else if constexpr (__have_sse2 && _Np > 7)
578 __execute_n_times<__div_roundup(_Np, 16)>([&](auto __i) {
579 constexpr int __offset = __i * 16;
580 constexpr int __todo = std::min(16, int(_Np) - __offset);
581 const int __bits = __x.template _M_extract<__offset>()._M_to_bits();
582 __vector_type16_t<_UChar> __bools;
583 if constexpr (__have_avx512f)
584 {
585 auto __as32bits
586 = _mm512_maskz_mov_epi32(__bits, __to_intrin(
587 __vector_broadcast<16>(1)));
588 auto __as16bits
589 = __xzyw(_mm256_packs_epi32(__lo256(__as32bits),
590 __todo > 8 ? __hi256(__as32bits)
591 : __m256i()));
592 __bools = __vector_bitcast<_UChar>(
593 _mm_packs_epi16(__lo128(__as16bits), __hi128(__as16bits)));
594 }
595 else
596 {
597 using _V = __vector_type_t<_UChar, 16>;
598 auto __tmp = _mm_cvtsi32_si128(__bits);
599 __tmp = _mm_unpacklo_epi8(__tmp, __tmp);
600 __tmp = _mm_unpacklo_epi16(__tmp, __tmp);
601 __tmp = _mm_unpacklo_epi32(__tmp, __tmp);
602 _V __tmp2 = reinterpret_cast<_V>(__tmp);
603 __tmp2 &= _V{1, 2, 4, 8, 16, 32, 64, 128,
604 1, 2, 4, 8, 16, 32, 64, 128}; // mask bit index
605 __bools = (__tmp2 == 0) + 1; // 0xff -> 0x00 | 0x00 -> 0x01
606 }
607 _S_store<__todo>(__bools, __mem + __offset);
608 });
609 else
610 _CommonImplBuiltin::_S_store_bool_array(__x, __mem);
611 }
612
613 // }}}
614 // _S_blend_avx512 {{{
615 // Returns: __k ? __b : __a
616 // TODO: reverse __a and __b to match COND_EXPR
617 // Requires: _TV to be a __vector_type_t matching valuetype for the bitmask
618 // __k
619 template <typename _Kp, typename _TV>
620 _GLIBCXX_SIMD_INTRINSIC static _TV
621 _S_blend_avx512(const _Kp __k, const _TV __a, const _TV __b) noexcept
622 {
623#ifdef __clang__
624 // FIXME: this does a boolean choice, not a blend
625 return __k ? __a : __b;
626#else
627 static_assert(__is_vector_type_v<_TV>);
628 using _Tp = typename _VectorTraits<_TV>::value_type;
629 static_assert(sizeof(_TV) >= 16);
630 static_assert(sizeof(_Tp) <= 8);
631 using _IntT
632 = conditional_t<(sizeof(_Tp) > 2),
633 conditional_t<sizeof(_Tp) == 4, int, long long>,
634 conditional_t<sizeof(_Tp) == 1, char, short>>;
635 [[maybe_unused]] const auto __aa = __vector_bitcast<_IntT>(__a);
636 [[maybe_unused]] const auto __bb = __vector_bitcast<_IntT>(__b);
637 if constexpr (sizeof(_TV) == 64)
638 {
639 if constexpr (sizeof(_Tp) == 1)
640 return reinterpret_cast<_TV>(
641 __builtin_ia32_blendmb_512_mask(__aa, __bb, __k));
642 else if constexpr (sizeof(_Tp) == 2)
643 return reinterpret_cast<_TV>(
644 __builtin_ia32_blendmw_512_mask(__aa, __bb, __k));
645 else if constexpr (sizeof(_Tp) == 4 && is_floating_point_v<_Tp>)
646 return __builtin_ia32_blendmps_512_mask(__a, __b, __k);
647 else if constexpr (sizeof(_Tp) == 4)
648 return reinterpret_cast<_TV>(
649 __builtin_ia32_blendmd_512_mask(__aa, __bb, __k));
650 else if constexpr (sizeof(_Tp) == 8 && is_floating_point_v<_Tp>)
651 return __builtin_ia32_blendmpd_512_mask(__a, __b, __k);
652 else if constexpr (sizeof(_Tp) == 8)
653 return reinterpret_cast<_TV>(
654 __builtin_ia32_blendmq_512_mask(__aa, __bb, __k));
655 }
656 else if constexpr (sizeof(_TV) == 32)
657 {
658 if constexpr (sizeof(_Tp) == 1)
659 return reinterpret_cast<_TV>(
660 __builtin_ia32_blendmb_256_mask(__aa, __bb, __k));
661 else if constexpr (sizeof(_Tp) == 2)
662 return reinterpret_cast<_TV>(
663 __builtin_ia32_blendmw_256_mask(__aa, __bb, __k));
664 else if constexpr (sizeof(_Tp) == 4 && is_floating_point_v<_Tp>)
665 return __builtin_ia32_blendmps_256_mask(__a, __b, __k);
666 else if constexpr (sizeof(_Tp) == 4)
667 return reinterpret_cast<_TV>(
668 __builtin_ia32_blendmd_256_mask(__aa, __bb, __k));
669 else if constexpr (sizeof(_Tp) == 8 && is_floating_point_v<_Tp>)
670 return __builtin_ia32_blendmpd_256_mask(__a, __b, __k);
671 else if constexpr (sizeof(_Tp) == 8)
672 return reinterpret_cast<_TV>(
673 __builtin_ia32_blendmq_256_mask(__aa, __bb, __k));
674 }
675 else if constexpr (sizeof(_TV) == 16)
676 {
677 if constexpr (sizeof(_Tp) == 1)
678 return reinterpret_cast<_TV>(
679 __builtin_ia32_blendmb_128_mask(__aa, __bb, __k));
680 else if constexpr (sizeof(_Tp) == 2)
681 return reinterpret_cast<_TV>(
682 __builtin_ia32_blendmw_128_mask(__aa, __bb, __k));
683 else if constexpr (sizeof(_Tp) == 4 && is_floating_point_v<_Tp>)
684 return __builtin_ia32_blendmps_128_mask(__a, __b, __k);
685 else if constexpr (sizeof(_Tp) == 4)
686 return reinterpret_cast<_TV>(
687 __builtin_ia32_blendmd_128_mask(__aa, __bb, __k));
688 else if constexpr (sizeof(_Tp) == 8 && is_floating_point_v<_Tp>)
689 return __builtin_ia32_blendmpd_128_mask(__a, __b, __k);
690 else if constexpr (sizeof(_Tp) == 8)
691 return reinterpret_cast<_TV>(
692 __builtin_ia32_blendmq_128_mask(__aa, __bb, __k));
693 }
694#endif
695 }
696
697 // }}}
698 // _S_blend_intrin {{{
699 // Returns: __k ? __b : __a
700 // TODO: reverse __a and __b to match COND_EXPR
701 // Requires: _Tp to be an intrinsic type (integers blend per byte) and 16/32
702 // Bytes wide
703 template <typename _Tp>
704 _GLIBCXX_SIMD_INTRINSIC static _Tp _S_blend_intrin(_Tp __k, _Tp __a,
705 _Tp __b) noexcept
706 {
707 static_assert(is_same_v<decltype(__to_intrin(__a)), _Tp>);
708 constexpr struct
709 {
710 _GLIBCXX_SIMD_INTRINSIC __m128 operator()(__m128 __a, __m128 __b,
711 __m128 __k) const noexcept
712 {
713 return __builtin_ia32_blendvps(__a, __b, __k);
714 }
715 _GLIBCXX_SIMD_INTRINSIC __m128d operator()(__m128d __a, __m128d __b,
716 __m128d __k) const noexcept
717 {
718 return __builtin_ia32_blendvpd(__a, __b, __k);
719 }
720 _GLIBCXX_SIMD_INTRINSIC __m128i operator()(__m128i __a, __m128i __b,
721 __m128i __k) const noexcept
722 {
723 return reinterpret_cast<__m128i>(
724 __builtin_ia32_pblendvb128(reinterpret_cast<__v16qi>(__a),
725 reinterpret_cast<__v16qi>(__b),
726 reinterpret_cast<__v16qi>(__k)));
727 }
728 _GLIBCXX_SIMD_INTRINSIC __m256 operator()(__m256 __a, __m256 __b,
729 __m256 __k) const noexcept
730 {
731 return __builtin_ia32_blendvps256(__a, __b, __k);
732 }
733 _GLIBCXX_SIMD_INTRINSIC __m256d operator()(__m256d __a, __m256d __b,
734 __m256d __k) const noexcept
735 {
736 return __builtin_ia32_blendvpd256(__a, __b, __k);
737 }
738 _GLIBCXX_SIMD_INTRINSIC __m256i operator()(__m256i __a, __m256i __b,
739 __m256i __k) const noexcept
740 {
741 if constexpr (__have_avx2)
742 return reinterpret_cast<__m256i>(
743 __builtin_ia32_pblendvb256(reinterpret_cast<__v32qi>(__a),
744 reinterpret_cast<__v32qi>(__b),
745 reinterpret_cast<__v32qi>(__k)));
746 else
747 return reinterpret_cast<__m256i>(
748 __builtin_ia32_blendvps256(reinterpret_cast<__v8sf>(__a),
749 reinterpret_cast<__v8sf>(__b),
750 reinterpret_cast<__v8sf>(__k)));
751 }
752 } __eval;
753 return __eval(__a, __b, __k);
754 }
755
756 // }}}
757 // _S_blend {{{
758 // Returns: __k ? __at1 : __at0
759 // TODO: reverse __at0 and __at1 to match COND_EXPR
760 template <typename _Tp, size_t _Np>
761 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
762 _S_blend(_SimdWrapper<bool, _Np> __k, _SimdWrapper<_Tp, _Np> __at0,
763 _SimdWrapper<_Tp, _Np> __at1)
764 {
765 static_assert(is_same_v<_Tp, _Tp> && __have_avx512f);
766 if (__k._M_is_constprop() && __at0._M_is_constprop()
767 && __at1._M_is_constprop())
768 return __generate_from_n_evaluations<_Np,
769 __vector_type_t<_Tp, _Np>>([&](
770 auto __i) constexpr { return __k[__i] ? __at1[__i] : __at0[__i]; });
771 else if constexpr (sizeof(__at0) == 64
772 || (__have_avx512vl && sizeof(__at0) >= 16))
773 return _S_blend_avx512(__k._M_data, __at0._M_data, __at1._M_data);
774 else
775 {
776 static_assert((__have_avx512vl && sizeof(__at0) < 16)
777 || !__have_avx512vl);
778 constexpr size_t __size = (__have_avx512vl ? 16 : 64) / sizeof(_Tp);
779 return __vector_bitcast<_Tp, _Np>(
780 _S_blend_avx512(__k._M_data, __vector_bitcast<_Tp, __size>(__at0),
781 __vector_bitcast<_Tp, __size>(__at1)));
782 }
783 }
784
785 template <typename _Tp, size_t _Np>
786 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
787 _S_blend(_SimdWrapper<__int_for_sizeof_t<_Tp>, _Np> __k,
788 _SimdWrapper<_Tp, _Np> __at0, _SimdWrapper<_Tp, _Np> __at1)
789 {
790 const auto __kk = __wrapper_bitcast<_Tp>(__k);
791 if (__builtin_is_constant_evaluated()
792 || (__kk._M_is_constprop() && __at0._M_is_constprop()
793 && __at1._M_is_constprop()))
794 {
795 auto __r = __or(__andnot(__kk, __at0), __and(__kk, __at1));
796 if (__r._M_is_constprop())
797 return __r;
798 }
799 if constexpr (((__have_avx512f && sizeof(__at0) == 64) || __have_avx512vl)
800 && (sizeof(_Tp) >= 4 || __have_avx512bw))
801 // convert to bitmask and call overload above
802 return _S_blend(
803 _SimdWrapper<bool, _Np>(
804 __make_dependent_t<_Tp, _MaskImplX86Mixin>::_S_to_bits(__k)
805 ._M_to_bits()),
806 __at0, __at1);
807 else
808 {
809 // Since GCC does not assume __k to be a mask, using the builtin
810 // conditional operator introduces an extra compare against 0 before
811 // blending. So we rather call the intrinsic here.
812 if constexpr (__have_sse4_1)
813 return _S_blend_intrin(__to_intrin(__kk), __to_intrin(__at0),
814 __to_intrin(__at1));
815 else
816 return __or(__andnot(__kk, __at0), __and(__kk, __at1));
817 }
818 }
819
820 // }}}
821};
822
823// }}}
824// _SimdImplX86 {{{
825template <typename _Abi>
826 struct _SimdImplX86 : _SimdImplBuiltin<_Abi>
827 {
828 using _Base = _SimdImplBuiltin<_Abi>;
829
830 template <typename _Tp>
831 using _MaskMember = typename _Base::template _MaskMember<_Tp>;
832
833 template <typename _Tp>
834 static constexpr size_t _S_full_size = _Abi::template _S_full_size<_Tp>;
835
836 template <typename _Tp>
837 static constexpr size_t _S_size = _Abi::template _S_size<_Tp>;
838
839 template <typename _Tp>
840 static constexpr size_t _S_max_store_size
841 = (sizeof(_Tp) >= 4 && __have_avx512f) || __have_avx512bw ? 64
842 : (is_floating_point_v<_Tp>&& __have_avx) || __have_avx2 ? 32
843 : 16;
844 using _MaskImpl = typename _Abi::_MaskImpl;
845
846 // _S_masked_load {{{
847 template <typename _Tp, size_t _Np, typename _Up>
848 static inline _SimdWrapper<_Tp, _Np>
849 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge, _MaskMember<_Tp> __k,
850 const _Up* __mem) noexcept
851 {
852 static_assert(_Np == _S_size<_Tp>);
853 if constexpr (is_same_v<_Tp, _Up> || // no conversion
854 (sizeof(_Tp) == sizeof(_Up)
855 && is_integral_v<
856 _Tp> == is_integral_v<_Up>) // conversion via bit
857 // reinterpretation
858 )
859 {
860 [[maybe_unused]] const auto __intrin = __to_intrin(__merge);
861 if constexpr ((__is_avx512_abi<_Abi>() || __have_avx512bw_vl)
862 && sizeof(_Tp) == 1)
863 {
864 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
865 if constexpr (sizeof(__intrin) == 16)
866 __merge = __vector_bitcast<_Tp, _Np>(
867 _mm_mask_loadu_epi8(__intrin, __kk, __mem));
868 else if constexpr (sizeof(__merge) == 32)
869 __merge = __vector_bitcast<_Tp, _Np>(
870 _mm256_mask_loadu_epi8(__intrin, __kk, __mem));
871 else if constexpr (sizeof(__merge) == 64)
872 __merge = __vector_bitcast<_Tp, _Np>(
873 _mm512_mask_loadu_epi8(__intrin, __kk, __mem));
874 else
875 __assert_unreachable<_Tp>();
876 }
877 else if constexpr ((__is_avx512_abi<_Abi>() || __have_avx512bw_vl)
878 && sizeof(_Tp) == 2)
879 {
880 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
881 if constexpr (sizeof(__intrin) == 16)
882 __merge = __vector_bitcast<_Tp, _Np>(
883 _mm_mask_loadu_epi16(__intrin, __kk, __mem));
884 else if constexpr (sizeof(__intrin) == 32)
885 __merge = __vector_bitcast<_Tp, _Np>(
886 _mm256_mask_loadu_epi16(__intrin, __kk, __mem));
887 else if constexpr (sizeof(__intrin) == 64)
888 __merge = __vector_bitcast<_Tp, _Np>(
889 _mm512_mask_loadu_epi16(__intrin, __kk, __mem));
890 else
891 __assert_unreachable<_Tp>();
892 }
893 else if constexpr ((__is_avx512_abi<_Abi>() || __have_avx512vl)
894 && sizeof(_Tp) == 4 && is_integral_v<_Up>)
895 {
896 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
897 if constexpr (sizeof(__intrin) == 16)
898 __merge = __vector_bitcast<_Tp, _Np>(
899 _mm_mask_loadu_epi32(__intrin, __kk, __mem));
900 else if constexpr (sizeof(__intrin) == 32)
901 __merge = __vector_bitcast<_Tp, _Np>(
902 _mm256_mask_loadu_epi32(__intrin, __kk, __mem));
903 else if constexpr (sizeof(__intrin) == 64)
904 __merge = __vector_bitcast<_Tp, _Np>(
905 _mm512_mask_loadu_epi32(__intrin, __kk, __mem));
906 else
907 __assert_unreachable<_Tp>();
908 }
909 else if constexpr ((__is_avx512_abi<_Abi>() || __have_avx512vl)
910 && sizeof(_Tp) == 4 && is_floating_point_v<_Up>)
911 {
912 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
913 if constexpr (sizeof(__intrin) == 16)
914 __merge = __vector_bitcast<_Tp, _Np>(
915 _mm_mask_loadu_ps(__intrin, __kk, __mem));
916 else if constexpr (sizeof(__intrin) == 32)
917 __merge = __vector_bitcast<_Tp, _Np>(
918 _mm256_mask_loadu_ps(__intrin, __kk, __mem));
919 else if constexpr (sizeof(__intrin) == 64)
920 __merge = __vector_bitcast<_Tp, _Np>(
921 _mm512_mask_loadu_ps(__intrin, __kk, __mem));
922 else
923 __assert_unreachable<_Tp>();
924 }
925 else if constexpr (__have_avx2 && sizeof(_Tp) == 4
926 && is_integral_v<_Up>)
927 {
928 static_assert(sizeof(__intrin) == 16 || sizeof(__intrin) == 32);
929 __merge
930 = __or(__andnot(__vector_bitcast<_Tp>(__k), __merge._M_data),
931 __vector_bitcast<_Tp, _Np>(
932 __maskload_epi32(reinterpret_cast<const int*>(__mem),
933 __to_intrin(__k))));
934 }
935 else if constexpr (__have_avx && sizeof(_Tp) == 4)
936 {
937 static_assert(sizeof(__intrin) == 16 || sizeof(__intrin) == 32);
938 __merge
939 = __or(__andnot(__vector_bitcast<_Tp>(__k), __merge._M_data),
940 __vector_bitcast<_Tp, _Np>(
941 __maskload_ps(reinterpret_cast<const float*>(__mem),
942 __to_intrin(__k))));
943 }
944 else if constexpr ((__is_avx512_abi<_Abi>() || __have_avx512vl)
945 && sizeof(_Tp) == 8 && is_integral_v<_Up>)
946 {
947 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
948 if constexpr (sizeof(__intrin) == 16)
949 __merge = __vector_bitcast<_Tp, _Np>(
950 _mm_mask_loadu_epi64(__intrin, __kk, __mem));
951 else if constexpr (sizeof(__intrin) == 32)
952 __merge = __vector_bitcast<_Tp, _Np>(
953 _mm256_mask_loadu_epi64(__intrin, __kk, __mem));
954 else if constexpr (sizeof(__intrin) == 64)
955 __merge = __vector_bitcast<_Tp, _Np>(
956 _mm512_mask_loadu_epi64(__intrin, __kk, __mem));
957 else
958 __assert_unreachable<_Tp>();
959 }
960 else if constexpr ((__is_avx512_abi<_Abi>() || __have_avx512vl)
961 && sizeof(_Tp) == 8 && is_floating_point_v<_Up>)
962 {
963 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
964 if constexpr (sizeof(__intrin) == 16)
965 __merge = __vector_bitcast<_Tp, _Np>(
966 _mm_mask_loadu_pd(__intrin, __kk, __mem));
967 else if constexpr (sizeof(__intrin) == 32)
968 __merge = __vector_bitcast<_Tp, _Np>(
969 _mm256_mask_loadu_pd(__intrin, __kk, __mem));
970 else if constexpr (sizeof(__intrin) == 64)
971 __merge = __vector_bitcast<_Tp, _Np>(
972 _mm512_mask_loadu_pd(__intrin, __kk, __mem));
973 else
974 __assert_unreachable<_Tp>();
975 }
976 else if constexpr (__have_avx2 && sizeof(_Tp) == 8
977 && is_integral_v<_Up>)
978 {
979 static_assert(sizeof(__intrin) == 16 || sizeof(__intrin) == 32);
980 __merge
981 = __or(__andnot(__vector_bitcast<_Tp>(__k), __merge._M_data),
982 __vector_bitcast<_Tp, _Np>(__maskload_epi64(
983 reinterpret_cast<const _LLong*>(__mem),
984 __to_intrin(__k))));
985 }
986 else if constexpr (__have_avx && sizeof(_Tp) == 8)
987 {
988 static_assert(sizeof(__intrin) == 16 || sizeof(__intrin) == 32);
989 __merge
990 = __or(__andnot(__vector_bitcast<_Tp>(__k), __merge._M_data),
991 __vector_bitcast<_Tp, _Np>(
992 __maskload_pd(reinterpret_cast<const double*>(__mem),
993 __to_intrin(__k))));
994 }
995 else
996 _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
997 [&](auto __i) {
998 __merge._M_set(__i, static_cast<_Tp>(
999 __mem[__i]));
1000 });
1001 }
1002 /* Very uncertain, that the following improves anything. Needs
1003 benchmarking
1004 * before it's activated.
1005 else if constexpr (sizeof(_Up) <= 8 && // no long double
1006 !__converts_via_decomposition_v<
1007 _Up, _Tp,
1008 sizeof(__merge)> // conversion via decomposition
1009 // is better handled via the
1010 // bit_iteration fallback below
1011 )
1012 {
1013 // TODO: copy pattern from _S_masked_store, which doesn't resort to
1014 // fixed_size
1015 using _Ap = simd_abi::deduce_t<_Up, _Np>;
1016 using _ATraits = _SimdTraits<_Up, _Ap>;
1017 using _AImpl = typename _ATraits::_SimdImpl;
1018 typename _ATraits::_SimdMember __uncvted{};
1019 typename _ATraits::_MaskMember __kk = _Ap::_MaskImpl::template
1020 _S_convert<_Up>(__k);
1021 __uncvted = _AImpl::_S_masked_load(__uncvted, __kk, __mem);
1022 _SimdConverter<_Up, _Ap, _Tp, _Abi> __converter;
1023 _Base::_S_masked_assign(__k, __merge, __converter(__uncvted));
1024 }
1025 */
1026 else
1027 __merge = _Base::_S_masked_load(__merge, __k, __mem);
1028 return __merge;
1029 }
1030
1031 // }}}
1032 // _S_masked_store_nocvt {{{
1033 template <typename _Tp, size_t _Np>
1034 _GLIBCXX_SIMD_INTRINSIC static void
1035 _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
1036 _SimdWrapper<bool, _Np> __k)
1037 {
1038 [[maybe_unused]] const auto __vi = __to_intrin(__v);
1039 if constexpr (sizeof(__vi) == 64)
1040 {
1041 static_assert(sizeof(__v) == 64 && __have_avx512f);
1042 if constexpr (__have_avx512bw && sizeof(_Tp) == 1)
1043 _mm512_mask_storeu_epi8(__mem, __k, __vi);
1044 else if constexpr (__have_avx512bw && sizeof(_Tp) == 2)
1045 _mm512_mask_storeu_epi16(__mem, __k, __vi);
1046 else if constexpr (__have_avx512f && sizeof(_Tp) == 4)
1047 {
1048 if constexpr (is_integral_v<_Tp>)
1049 _mm512_mask_storeu_epi32(__mem, __k, __vi);
1050 else
1051 _mm512_mask_storeu_ps(__mem, __k, __vi);
1052 }
1053 else if constexpr (__have_avx512f && sizeof(_Tp) == 8)
1054 {
1055 if constexpr (is_integral_v<_Tp>)
1056 _mm512_mask_storeu_epi64(__mem, __k, __vi);
1057 else
1058 _mm512_mask_storeu_pd(__mem, __k, __vi);
1059 }
1060#if 0 // with KNL either sizeof(_Tp) >= 4 or sizeof(_vi) <= 32
1061 // with Skylake-AVX512, __have_avx512bw is true
1062 else if constexpr (__have_sse2)
1063 {
1064 using _M = __vector_type_t<_Tp, _Np>;
1065 using _MVT = _VectorTraits<_M>;
1066 _mm_maskmoveu_si128(__auto_bitcast(__extract<0, 4>(__v._M_data)),
1067 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(__k._M_data)),
1068 reinterpret_cast<char*>(__mem));
1069 _mm_maskmoveu_si128(__auto_bitcast(__extract<1, 4>(__v._M_data)),
1070 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
1071 __k._M_data >> 1 * _MVT::_S_full_size)),
1072 reinterpret_cast<char*>(__mem) + 1 * 16);
1073 _mm_maskmoveu_si128(__auto_bitcast(__extract<2, 4>(__v._M_data)),
1074 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
1075 __k._M_data >> 2 * _MVT::_S_full_size)),
1076 reinterpret_cast<char*>(__mem) + 2 * 16);
1077 if constexpr (_Np > 48 / sizeof(_Tp))
1078 _mm_maskmoveu_si128(
1079 __auto_bitcast(__extract<3, 4>(__v._M_data)),
1080 __auto_bitcast(_MaskImpl::template _S_convert<_Tp, _Np>(
1081 __k._M_data >> 3 * _MVT::_S_full_size)),
1082 reinterpret_cast<char*>(__mem) + 3 * 16);
1083 }
1084#endif
1085 else
1086 __assert_unreachable<_Tp>();
1087 }
1088 else if constexpr (sizeof(__vi) == 32)
1089 {
1090 if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 1)
1091 _mm256_mask_storeu_epi8(__mem, __k, __vi);
1092 else if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 2)
1093 _mm256_mask_storeu_epi16(__mem, __k, __vi);
1094 else if constexpr (__have_avx512vl && sizeof(_Tp) == 4)
1095 {
1096 if constexpr (is_integral_v<_Tp>)
1097 _mm256_mask_storeu_epi32(__mem, __k, __vi);
1098 else
1099 _mm256_mask_storeu_ps(__mem, __k, __vi);
1100 }
1101 else if constexpr (__have_avx512vl && sizeof(_Tp) == 8)
1102 {
1103 if constexpr (is_integral_v<_Tp>)
1104 _mm256_mask_storeu_epi64(__mem, __k, __vi);
1105 else
1106 _mm256_mask_storeu_pd(__mem, __k, __vi);
1107 }
1108 else if constexpr (__have_avx512f
1109 && (sizeof(_Tp) >= 4 || __have_avx512bw))
1110 {
1111 // use a 512-bit maskstore, using zero-extension of the bitmask
1112 _S_masked_store_nocvt(
1113 _SimdWrapper64<_Tp>(
1114 __intrin_bitcast<__vector_type64_t<_Tp>>(__v._M_data)),
1115 __mem, _SimdWrapper<bool, 64 / sizeof(_Tp)>(__k._M_data));
1116 }
1117 else
1118 _S_masked_store_nocvt(__v, __mem,
1119 _MaskImpl::template _S_to_maskvector<
1120 __int_for_sizeof_t<_Tp>, _Np>(__k));
1121 }
1122 else if constexpr (sizeof(__vi) == 16)
1123 {
1124 if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 1)
1125 _mm_mask_storeu_epi8(__mem, __k, __vi);
1126 else if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 2)
1127 _mm_mask_storeu_epi16(__mem, __k, __vi);
1128 else if constexpr (__have_avx512vl && sizeof(_Tp) == 4)
1129 {
1130 if constexpr (is_integral_v<_Tp>)
1131 _mm_mask_storeu_epi32(__mem, __k, __vi);
1132 else
1133 _mm_mask_storeu_ps(__mem, __k, __vi);
1134 }
1135 else if constexpr (__have_avx512vl && sizeof(_Tp) == 8)
1136 {
1137 if constexpr (is_integral_v<_Tp>)
1138 _mm_mask_storeu_epi64(__mem, __k, __vi);
1139 else
1140 _mm_mask_storeu_pd(__mem, __k, __vi);
1141 }
1142 else if constexpr (__have_avx512f
1143 && (sizeof(_Tp) >= 4 || __have_avx512bw))
1144 {
1145 // use a 512-bit maskstore, using zero-extension of the bitmask
1146 _S_masked_store_nocvt(
1147 _SimdWrapper64<_Tp>(
1148 __intrin_bitcast<__intrinsic_type64_t<_Tp>>(__v._M_data)),
1149 __mem, _SimdWrapper<bool, 64 / sizeof(_Tp)>(__k._M_data));
1150 }
1151 else
1152 _S_masked_store_nocvt(__v, __mem,
1153 _MaskImpl::template _S_to_maskvector<
1154 __int_for_sizeof_t<_Tp>, _Np>(__k));
1155 }
1156 else
1157 __assert_unreachable<_Tp>();
1158 }
1159
1160 template <typename _Tp, size_t _Np>
1161 _GLIBCXX_SIMD_INTRINSIC static void
1162 _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
1163 _SimdWrapper<__int_for_sizeof_t<_Tp>, _Np> __k)
1164 {
1165 if constexpr (sizeof(__v) <= 16)
1166 {
1167 [[maybe_unused]] const auto __vi
1168 = __intrin_bitcast<__m128i>(__as_vector(__v));
1169 [[maybe_unused]] const auto __ki
1170 = __intrin_bitcast<__m128i>(__as_vector(__k));
1171 if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 1)
1172 _mm_mask_storeu_epi8(__mem, _mm_movepi8_mask(__ki), __vi);
1173 else if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 2)
1174 _mm_mask_storeu_epi16(__mem, _mm_movepi16_mask(__ki), __vi);
1175 else if constexpr (__have_avx2 && sizeof(_Tp) == 4
1176 && is_integral_v<_Tp>)
1177 _mm_maskstore_epi32(reinterpret_cast<int*>(__mem), __ki, __vi);
1178 else if constexpr (__have_avx && sizeof(_Tp) == 4)
1179 _mm_maskstore_ps(reinterpret_cast<float*>(__mem), __ki,
1180 __vector_bitcast<float>(__vi));
1181 else if constexpr (__have_avx2 && sizeof(_Tp) == 8
1182 && is_integral_v<_Tp>)
1183 _mm_maskstore_epi64(reinterpret_cast<_LLong*>(__mem), __ki, __vi);
1184 else if constexpr (__have_avx && sizeof(_Tp) == 8)
1185 _mm_maskstore_pd(reinterpret_cast<double*>(__mem), __ki,
1186 __vector_bitcast<double>(__vi));
1187 else if constexpr (__have_sse2)
1188 _mm_maskmoveu_si128(__vi, __ki, reinterpret_cast<char*>(__mem));
1189 }
1190 else if constexpr (sizeof(__v) == 32)
1191 {
1192 [[maybe_unused]] const auto __vi
1193 = __intrin_bitcast<__m256i>(__as_vector(__v));
1194 [[maybe_unused]] const auto __ki
1195 = __intrin_bitcast<__m256i>(__as_vector(__k));
1196 if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 1)
1197 _mm256_mask_storeu_epi8(__mem, _mm256_movepi8_mask(__ki), __vi);
1198 else if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 2)
1199 _mm256_mask_storeu_epi16(__mem, _mm256_movepi16_mask(__ki), __vi);
1200 else if constexpr (__have_avx2 && sizeof(_Tp) == 4
1201 && is_integral_v<_Tp>)
1202 _mm256_maskstore_epi32(reinterpret_cast<int*>(__mem), __ki, __vi);
1203 else if constexpr (sizeof(_Tp) == 4)
1204 _mm256_maskstore_ps(reinterpret_cast<float*>(__mem), __ki,
1205 __vector_bitcast<float>(__v));
1206 else if constexpr (__have_avx2 && sizeof(_Tp) == 8
1207 && is_integral_v<_Tp>)
1208 _mm256_maskstore_epi64(reinterpret_cast<_LLong*>(__mem), __ki,
1209 __vi);
1210 else if constexpr (__have_avx && sizeof(_Tp) == 8)
1211 _mm256_maskstore_pd(reinterpret_cast<double*>(__mem), __ki,
1212 __vector_bitcast<double>(__v));
1213 else if constexpr (__have_sse2)
1214 {
1215 _mm_maskmoveu_si128(__lo128(__vi), __lo128(__ki),
1216 reinterpret_cast<char*>(__mem));
1217 _mm_maskmoveu_si128(__hi128(__vi), __hi128(__ki),
1218 reinterpret_cast<char*>(__mem) + 16);
1219 }
1220 }
1221 else
1222 __assert_unreachable<_Tp>();
1223 }
1224
1225 // }}}
1226 // _S_masked_store {{{
1227 template <typename _Tp, size_t _Np, typename _Up>
1228 _GLIBCXX_SIMD_INTRINSIC static void
1229 _S_masked_store(const _SimdWrapper<_Tp, _Np> __v, _Up* __mem,
1230 const _MaskMember<_Tp> __k) noexcept
1231 {
1232 if constexpr (is_integral_v<
1233 _Tp> && is_integral_v<_Up> && sizeof(_Tp) > sizeof(_Up)
1234 && __have_avx512f && (sizeof(_Tp) >= 4 || __have_avx512bw)
1235 && (sizeof(__v) == 64 || __have_avx512vl))
1236 { // truncating store
1237 const auto __vi = __to_intrin(__v);
1238 const auto __kk = _MaskImpl::_S_to_bits(__k)._M_to_bits();
1239 if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 4
1240 && sizeof(__vi) == 64)
1241 _mm512_mask_cvtepi64_storeu_epi32(__mem, __kk, __vi);
1242 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 4
1243 && sizeof(__vi) == 32)
1244 _mm256_mask_cvtepi64_storeu_epi32(__mem, __kk, __vi);
1245 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 4
1246 && sizeof(__vi) == 16)
1247 _mm_mask_cvtepi64_storeu_epi32(__mem, __kk, __vi);
1248 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 2
1249 && sizeof(__vi) == 64)
1250 _mm512_mask_cvtepi64_storeu_epi16(__mem, __kk, __vi);
1251 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 2
1252 && sizeof(__vi) == 32)
1253 _mm256_mask_cvtepi64_storeu_epi16(__mem, __kk, __vi);
1254 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 2
1255 && sizeof(__vi) == 16)
1256 _mm_mask_cvtepi64_storeu_epi16(__mem, __kk, __vi);
1257 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 1
1258 && sizeof(__vi) == 64)
1259 _mm512_mask_cvtepi64_storeu_epi8(__mem, __kk, __vi);
1260 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 1
1261 && sizeof(__vi) == 32)
1262 _mm256_mask_cvtepi64_storeu_epi8(__mem, __kk, __vi);
1263 else if constexpr (sizeof(_Tp) == 8 && sizeof(_Up) == 1
1264 && sizeof(__vi) == 16)
1265 _mm_mask_cvtepi64_storeu_epi8(__mem, __kk, __vi);
1266 else if constexpr (sizeof(_Tp) == 4 && sizeof(_Up) == 2
1267 && sizeof(__vi) == 64)
1268 _mm512_mask_cvtepi32_storeu_epi16(__mem, __kk, __vi);
1269 else if constexpr (sizeof(_Tp) == 4 && sizeof(_Up) == 2
1270 && sizeof(__vi) == 32)
1271 _mm256_mask_cvtepi32_storeu_epi16(__mem, __kk, __vi);
1272 else if constexpr (sizeof(_Tp) == 4 && sizeof(_Up) == 2
1273 && sizeof(__vi) == 16)
1274 _mm_mask_cvtepi32_storeu_epi16(__mem, __kk, __vi);
1275 else if constexpr (sizeof(_Tp) == 4 && sizeof(_Up) == 1
1276 && sizeof(__vi) == 64)
1277 _mm512_mask_cvtepi32_storeu_epi8(__mem, __kk, __vi);
1278 else if constexpr (sizeof(_Tp) == 4 && sizeof(_Up) == 1
1279 && sizeof(__vi) == 32)
1280 _mm256_mask_cvtepi32_storeu_epi8(__mem, __kk, __vi);
1281 else if constexpr (sizeof(_Tp) == 4 && sizeof(_Up) == 1
1282 && sizeof(__vi) == 16)
1283 _mm_mask_cvtepi32_storeu_epi8(__mem, __kk, __vi);
1284 else if constexpr (sizeof(_Tp) == 2 && sizeof(_Up) == 1
1285 && sizeof(__vi) == 64)
1286 _mm512_mask_cvtepi16_storeu_epi8(__mem, __kk, __vi);
1287 else if constexpr (sizeof(_Tp) == 2 && sizeof(_Up) == 1
1288 && sizeof(__vi) == 32)
1289 _mm256_mask_cvtepi16_storeu_epi8(__mem, __kk, __vi);
1290 else if constexpr (sizeof(_Tp) == 2 && sizeof(_Up) == 1
1291 && sizeof(__vi) == 16)
1292 _mm_mask_cvtepi16_storeu_epi8(__mem, __kk, __vi);
1293 else
1294 __assert_unreachable<_Tp>();
1295 }
1296 else
1297 _Base::_S_masked_store(__v, __mem, __k);
1298 }
1299
1300 // }}}
1301 // _S_multiplies {{{
1302 template <typename _V, typename _VVT = _VectorTraits<_V>>
1303 _GLIBCXX_SIMD_INTRINSIC static constexpr _V _S_multiplies(_V __x, _V __y)
1304 {
1305 using _Tp = typename _VVT::value_type;
1306 if (__builtin_is_constant_evaluated() || __x._M_is_constprop()
1307 || __y._M_is_constprop())
1308 return __as_vector(__x) * __as_vector(__y);
1309 else if constexpr (sizeof(_Tp) == 1)
1310 {
1311 if constexpr (sizeof(_V) == 2)
1312 {
1313 const auto __xs = reinterpret_cast<short>(__x._M_data);
1314 const auto __ys = reinterpret_cast<short>(__y._M_data);
1315 return reinterpret_cast<__vector_type_t<_Tp, 2>>(short(
1316 ((__xs * __ys) & 0xff) | ((__xs >> 8) * (__ys & 0xff00))));
1317 }
1318 else if constexpr (sizeof(_V) == 4 && _VVT::_S_partial_width == 3)
1319 {
1320 const auto __xi = reinterpret_cast<int>(__x._M_data);
1321 const auto __yi = reinterpret_cast<int>(__y._M_data);
1322 return reinterpret_cast<__vector_type_t<_Tp, 3>>(
1323 ((__xi * __yi) & 0xff)
1324 | (((__xi >> 8) * (__yi & 0xff00)) & 0xff00)
1325 | ((__xi >> 16) * (__yi & 0xff0000)));
1326 }
1327 else if constexpr (sizeof(_V) == 4)
1328 {
1329 const auto __xi = reinterpret_cast<int>(__x._M_data);
1330 const auto __yi = reinterpret_cast<int>(__y._M_data);
1331 return reinterpret_cast<__vector_type_t<_Tp, 4>>(
1332 ((__xi * __yi) & 0xff)
1333 | (((__xi >> 8) * (__yi & 0xff00)) & 0xff00)
1334 | (((__xi >> 16) * (__yi & 0xff0000)) & 0xff0000)
1335 | ((__xi >> 24) * (__yi & 0xff000000u)));
1336 }
1337 else if constexpr (sizeof(_V) == 8 && __have_avx2
1338 && is_signed_v<_Tp>)
1339 return __convert<typename _VVT::type>(
1340 __vector_bitcast<short>(_mm_cvtepi8_epi16(__to_intrin(__x)))
1341 * __vector_bitcast<short>(_mm_cvtepi8_epi16(__to_intrin(__y))));
1342 else if constexpr (sizeof(_V) == 8 && __have_avx2
1343 && is_unsigned_v<_Tp>)
1344 return __convert<typename _VVT::type>(
1345 __vector_bitcast<short>(_mm_cvtepu8_epi16(__to_intrin(__x)))
1346 * __vector_bitcast<short>(_mm_cvtepu8_epi16(__to_intrin(__y))));
1347 else
1348 {
1349 // codegen of `x*y` is suboptimal (as of GCC 9.0.1)
1350 constexpr size_t __full_size = _VVT::_S_full_size;
1351 constexpr int _Np = sizeof(_V) >= 16 ? __full_size / 2 : 8;
1352 using _ShortW = _SimdWrapper<short, _Np>;
1353 const _ShortW __even = __vector_bitcast<short, _Np>(__x)
1354 * __vector_bitcast<short, _Np>(__y);
1355 _ShortW __high_byte = _ShortW()._M_data - 256;
1356 //[&]() { asm("" : "+x"(__high_byte._M_data)); }();
1357 const _ShortW __odd
1358 = (__vector_bitcast<short, _Np>(__x) >> 8)
1359 * (__vector_bitcast<short, _Np>(__y) & __high_byte._M_data);
1360 if constexpr (__have_avx512bw && sizeof(_V) > 2)
1361 return _CommonImplX86::_S_blend_avx512(
1362 0xaaaa'aaaa'aaaa'aaaaLL, __vector_bitcast<_Tp>(__even),
1363 __vector_bitcast<_Tp>(__odd));
1364 else if constexpr (__have_sse4_1 && sizeof(_V) > 2)
1365 return _CommonImplX86::_S_blend_intrin(__to_intrin(
1366 __high_byte),
1367 __to_intrin(__even),
1368 __to_intrin(__odd));
1369 else
1370 return __to_intrin(
1371 __or(__andnot(__high_byte, __even), __odd));
1372 }
1373 }
1374 else
1375 return _Base::_S_multiplies(__x, __y);
1376 }
1377
1378 // }}}
1379 // _S_divides {{{
1380#ifdef _GLIBCXX_SIMD_WORKAROUND_PR90993
1381 template <typename _Tp, size_t _Np>
1382 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
1383 _S_divides(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1384 {
1385 if (!__builtin_is_constant_evaluated()
1386 && !__builtin_constant_p(__y._M_data))
1387 if constexpr (is_integral_v<_Tp> && sizeof(_Tp) <= 4)
1388 { // use divps - codegen of `x/y` is suboptimal (as of GCC 9.0.1)
1389 // Note that using floating-point division is likely to raise the
1390 // *Inexact* exception flag and thus appears like an invalid
1391 // "as-if" transformation. However, C++ doesn't specify how the
1392 // fpenv can be observed and points to C. C says that function
1393 // calls are assumed to potentially raise fp exceptions, unless
1394 // documented otherwise. Consequently, operator/, which is a
1395 // function call, may raise fp exceptions.
1396 /*const struct _CsrGuard
1397 {
1398 const unsigned _M_data = _mm_getcsr();
1399 _CsrGuard()
1400 {
1401 _mm_setcsr(0x9f80); // turn off FP exceptions and
1402 flush-to-zero
1403 }
1404 ~_CsrGuard() { _mm_setcsr(_M_data); }
1405 } __csr;*/
1406 using _Float = conditional_t<sizeof(_Tp) == 4, double, float>;
1407 constexpr size_t __n_intermediate
1408 = std::min(_Np, (__have_avx512f ? 64
1409 : __have_avx ? 32
1410 : 16)
1411 / sizeof(_Float));
1412 using _FloatV = __vector_type_t<_Float, __n_intermediate>;
1413 constexpr size_t __n_floatv
1414 = __div_roundup(_Np, __n_intermediate);
1415 using _R = __vector_type_t<_Tp, _Np>;
1416 const auto __xf = __convert_all<_FloatV, __n_floatv>(__x);
1417 const auto __yf = __convert_all<_FloatV, __n_floatv>(
1418 _Abi::__make_padding_nonzero(__as_vector(__y)));
1419 return __call_with_n_evaluations<__n_floatv>(
1420 [](auto... __quotients) {
1421 return __vector_convert<_R>(__quotients...);
1422 },
1423 [&__xf,
1424 &__yf](auto __i) -> _SimdWrapper<_Float, __n_intermediate> {
1425#if __GCC_IEC_559 == 0
1426 // If -freciprocal-math is active, using the `/` operator is
1427 // incorrect because it may be translated to an imprecise
1428 // multiplication with reciprocal. We need to use inline
1429 // assembly to force a real division.
1430 _FloatV __r;
1431 if constexpr (__have_avx) // -mno-sse2avx is irrelevant
1432 // because once -mavx is given, GCC
1433 // emits VEX encoded vdivp[sd]
1434 {
1435 if constexpr (sizeof(_Tp) == 4)
1436 asm("vdivpd\t{%2, %1, %0|%0, %1, %2}"
1437 : "=x"(__r)
1438 : "x"(__xf[__i]), "x"(__yf[__i]));
1439 else
1440 asm("vdivps\t{%2, %1, %0|%0, %1, %2}"
1441 : "=x"(__r)
1442 : "x"(__xf[__i]), "x"(__yf[__i]));
1443 }
1444 else
1445 {
1446 __r = __xf[__i];
1447 if constexpr (sizeof(_Tp) == 4)
1448 asm("divpd\t{%1, %0|%0, %1}"
1449 : "=x"(__r)
1450 : "x"(__yf[__i]));
1451 else
1452 asm("divps\t{%1, %0|%0, %1}"
1453 : "=x"(__r)
1454 : "x"(__yf[__i]));
1455 }
1456 return __r;
1457#else
1458 return __xf[__i] / __yf[__i];
1459#endif
1460 });
1461 }
1462 /* 64-bit int division is potentially optimizable via double division if
1463 * the value in __x is small enough and the conversion between
1464 * int<->double is efficient enough:
1465 else if constexpr (is_integral_v<_Tp> && is_unsigned_v<_Tp> &&
1466 sizeof(_Tp) == 8)
1467 {
1468 if constexpr (__have_sse4_1 && sizeof(__x) == 16)
1469 {
1470 if (_mm_test_all_zeros(__x, __m128i{0xffe0'0000'0000'0000ull,
1471 0xffe0'0000'0000'0000ull}))
1472 {
1473 __x._M_data | 0x __vector_convert<__m128d>(__x._M_data)
1474 }
1475 }
1476 }
1477 */
1478 return _Base::_S_divides(__x, __y);
1479 }
1480#endif // _GLIBCXX_SIMD_WORKAROUND_PR90993
1481
1482 // }}}
1483 // _S_modulus {{{
1484 template <typename _Tp, size_t _Np>
1485 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
1486 _S_modulus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1487 {
1488 if (__builtin_is_constant_evaluated()
1489 || __builtin_constant_p(__y._M_data) || sizeof(_Tp) >= 8)
1490 return _Base::_S_modulus(__x, __y);
1491 else
1492 return _Base::_S_minus(__x, _S_multiplies(__y, _S_divides(__x, __y)));
1493 }
1494
1495 // }}}
1496 // _S_bit_shift_left {{{
1497 // Notes on UB. C++2a [expr.shift] says:
1498 // -1- [...] The operands shall be of integral or unscoped enumeration type
1499 // and integral promotions are performed. The type of the result is that
1500 // of the promoted left operand. The behavior is undefined if the right
1501 // operand is negative, or greater than or equal to the width of the
1502 // promoted left operand.
1503 // -2- The value of E1 << E2 is the unique value congruent to E1×2^E2 modulo
1504 // 2^N, where N is the width of the type of the result.
1505 //
1506 // C++17 [expr.shift] says:
1507 // -2- The value of E1 << E2 is E1 left-shifted E2 bit positions; vacated
1508 // bits are zero-filled. If E1 has an unsigned type, the value of the
1509 // result is E1 × 2^E2 , reduced modulo one more than the maximum value
1510 // representable in the result type. Otherwise, if E1 has a signed type
1511 // and non-negative value, and E1 × 2^E2 is representable in the
1512 // corresponding unsigned type of the result type, then that value,
1513 // converted to the result type, is the resulting value; otherwise, the
1514 // behavior is undefined.
1515 //
1516 // Consequences:
1517 // With C++2a signed and unsigned types have the same UB
1518 // characteristics:
1519 // - left shift is not UB for 0 <= RHS < max(32, #bits(T))
1520 //
1521 // With C++17 there's little room for optimizations because the standard
1522 // requires all shifts to happen on promoted integrals (i.e. int). Thus,
1523 // short and char shifts must assume shifts affect bits of neighboring
1524 // values.
1525 #ifndef _GLIBCXX_SIMD_NO_SHIFT_OPT
1526 template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1527 constexpr inline _GLIBCXX_CONST static typename _TVT::type
1528 _S_bit_shift_left(_Tp __xx, int __y)
1529 {
1530 using _V = typename _TVT::type;
1531 using _Up = typename _TVT::value_type;
1532 _V __x = __xx;
1533 [[maybe_unused]] const auto __ix = __to_intrin(__x);
1534 if (__builtin_is_constant_evaluated())
1535 return __x << __y;
1536#if __cplusplus > 201703
1537 // after C++17, signed shifts have no UB, and behave just like unsigned
1538 // shifts
1539 else if constexpr (sizeof(_Up) == 1 && is_signed_v<_Up>)
1540 return __vector_bitcast<_Up>(
1541 _S_bit_shift_left(__vector_bitcast<make_unsigned_t<_Up>>(__x),
1542 __y));
1543#endif
1544 else if constexpr (sizeof(_Up) == 1)
1545 {
1546 // (cf. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83894)
1547 if (__builtin_constant_p(__y))
1548 {
1549 if (__y == 0)
1550 return __x;
1551 else if (__y == 1)
1552 return __x + __x;
1553 else if (__y == 2)
1554 {
1555 __x = __x + __x;
1556 return __x + __x;
1557 }
1558 else if (__y > 2 && __y < 8)
1559 {
1560 if constexpr (sizeof(__x) > sizeof(unsigned))
1561 {
1562 const _UChar __mask = 0xff << __y; // precomputed vector
1563 return __vector_bitcast<_Up>(
1564 __vector_bitcast<_UChar>(
1565 __vector_bitcast<unsigned>(__x) << __y)
1566 & __mask);
1567 }
1568 else
1569 {
1570 const unsigned __mask
1571 = (0xff & (0xff << __y)) * 0x01010101u;
1572 return reinterpret_cast<_V>(
1573 static_cast<__int_for_sizeof_t<_V>>(
1574 unsigned(
1575 reinterpret_cast<__int_for_sizeof_t<_V>>(__x)
1576 << __y)
1577 & __mask));
1578 }
1579 }
1580 else if (__y >= 8 && __y < 32)
1581 return _V();
1582 else
1583 __builtin_unreachable();
1584 }
1585 // general strategy in the following: use an sllv instead of sll
1586 // instruction, because it's 2 to 4 times faster:
1587 else if constexpr (__have_avx512bw_vl && sizeof(__x) == 16)
1588 return __vector_bitcast<_Up>(_mm256_cvtepi16_epi8(
1589 _mm256_sllv_epi16(_mm256_cvtepi8_epi16(__ix),
1590 _mm256_set1_epi16(__y))));
1591 else if constexpr (__have_avx512bw && sizeof(__x) == 32)
1592 return __vector_bitcast<_Up>(_mm512_cvtepi16_epi8(
1593 _mm512_sllv_epi16(_mm512_cvtepi8_epi16(__ix),
1594 _mm512_set1_epi16(__y))));
1595 else if constexpr (__have_avx512bw && sizeof(__x) == 64)
1596 {
1597 const auto __shift = _mm512_set1_epi16(__y);
1598 return __vector_bitcast<_Up>(
1599 __concat(_mm512_cvtepi16_epi8(_mm512_sllv_epi16(
1600 _mm512_cvtepi8_epi16(__lo256(__ix)), __shift)),
1601 _mm512_cvtepi16_epi8(_mm512_sllv_epi16(
1602 _mm512_cvtepi8_epi16(__hi256(__ix)), __shift))));
1603 }
1604 else if constexpr (__have_avx2 && sizeof(__x) == 32)
1605 {
1606#if 1
1607 const auto __shift = _mm_cvtsi32_si128(__y);
1608 auto __k
1609 = _mm256_sll_epi16(_mm256_slli_epi16(~__m256i(), 8), __shift);
1610 __k |= _mm256_srli_epi16(__k, 8);
1611 return __vector_bitcast<_Up>(_mm256_sll_epi32(__ix, __shift)
1612 & __k);
1613#else
1614 const _Up __k = 0xff << __y;
1615 return __vector_bitcast<_Up>(__vector_bitcast<int>(__x) << __y)
1616 & __k;
1617#endif
1618 }
1619 else
1620 {
1621 const auto __shift = _mm_cvtsi32_si128(__y);
1622 auto __k
1623 = _mm_sll_epi16(_mm_slli_epi16(~__m128i(), 8), __shift);
1624 __k |= _mm_srli_epi16(__k, 8);
1625 return __intrin_bitcast<_V>(_mm_sll_epi16(__ix, __shift) & __k);
1626 }
1627 }
1628 return __x << __y;
1629 }
1630
1631 template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1632 constexpr inline _GLIBCXX_CONST static typename _TVT::type
1633 _S_bit_shift_left(_Tp __xx, typename _TVT::type __y)
1634 {
1635 using _V = typename _TVT::type;
1636 using _Up = typename _TVT::value_type;
1637 _V __x = __xx;
1638 [[maybe_unused]] const auto __ix = __to_intrin(__x);
1639 [[maybe_unused]] const auto __iy = __to_intrin(__y);
1640 if (__builtin_is_constant_evaluated())
1641 return __x << __y;
1642#if __cplusplus > 201703
1643 // after C++17, signed shifts have no UB, and behave just like unsigned
1644 // shifts
1645 else if constexpr (is_signed_v<_Up>)
1646 return __vector_bitcast<_Up>(
1647 _S_bit_shift_left(__vector_bitcast<make_unsigned_t<_Up>>(__x),
1648 __vector_bitcast<make_unsigned_t<_Up>>(__y)));
1649#endif
1650 else if constexpr (sizeof(_Up) == 1)
1651 {
1652 if constexpr (sizeof __ix == 64 && __have_avx512bw)
1653 return __vector_bitcast<_Up>(__concat(
1654 _mm512_cvtepi16_epi8(
1655 _mm512_sllv_epi16(_mm512_cvtepu8_epi16(__lo256(__ix)),
1656 _mm512_cvtepu8_epi16(__lo256(__iy)))),
1657 _mm512_cvtepi16_epi8(
1658 _mm512_sllv_epi16(_mm512_cvtepu8_epi16(__hi256(__ix)),
1659 _mm512_cvtepu8_epi16(__hi256(__iy))))));
1660 else if constexpr (sizeof __ix == 32 && __have_avx512bw)
1661 return __vector_bitcast<_Up>(_mm512_cvtepi16_epi8(
1662 _mm512_sllv_epi16(_mm512_cvtepu8_epi16(__ix),
1663 _mm512_cvtepu8_epi16(__iy))));
1664 else if constexpr (sizeof __x <= 8 && __have_avx512bw_vl)
1665 return __intrin_bitcast<_V>(
1666 _mm_cvtepi16_epi8(_mm_sllv_epi16(_mm_cvtepu8_epi16(__ix),
1667 _mm_cvtepu8_epi16(__iy))));
1668 else if constexpr (sizeof __ix == 16 && __have_avx512bw_vl)
1669 return __intrin_bitcast<_V>(_mm256_cvtepi16_epi8(
1670 _mm256_sllv_epi16(_mm256_cvtepu8_epi16(__ix),
1671 _mm256_cvtepu8_epi16(__iy))));
1672 else if constexpr (sizeof __ix == 16 && __have_avx512bw)
1673 return __intrin_bitcast<_V>(
1674 __lo128(_mm512_cvtepi16_epi8(_mm512_sllv_epi16(
1675 _mm512_cvtepu8_epi16(_mm256_castsi128_si256(__ix)),
1676 _mm512_cvtepu8_epi16(_mm256_castsi128_si256(__iy))))));
1677 else if constexpr (__have_sse4_1 && sizeof(__x) == 16)
1678 {
1679 auto __mask
1680 = __vector_bitcast<_Up>(__vector_bitcast<short>(__y) << 5);
1681 auto __x4
1682 = __vector_bitcast<_Up>(__vector_bitcast<short>(__x) << 4);
1683 __x4 &= char(0xf0);
1684 __x = reinterpret_cast<_V>(_CommonImplX86::_S_blend_intrin(
1685 __to_intrin(__mask), __to_intrin(__x), __to_intrin(__x4)));
1686 __mask += __mask;
1687 auto __x2
1688 = __vector_bitcast<_Up>(__vector_bitcast<short>(__x) << 2);
1689 __x2 &= char(0xfc);
1690 __x = reinterpret_cast<_V>(_CommonImplX86::_S_blend_intrin(
1691 __to_intrin(__mask), __to_intrin(__x), __to_intrin(__x2)));
1692 __mask += __mask;
1693 auto __x1 = __x + __x;
1694 __x = reinterpret_cast<_V>(_CommonImplX86::_S_blend_intrin(
1695 __to_intrin(__mask), __to_intrin(__x), __to_intrin(__x1)));
1696 return __x
1697 & ((__y & char(0xf8)) == 0); // y > 7 nulls the result
1698 }
1699 else if constexpr (sizeof(__x) == 16)
1700 {
1701 auto __mask
1702 = __vector_bitcast<_UChar>(__vector_bitcast<short>(__y) << 5);
1703 auto __x4
1704 = __vector_bitcast<_Up>(__vector_bitcast<short>(__x) << 4);
1705 __x4 &= char(0xf0);
1706 __x = __vector_bitcast<_SChar>(__mask) < 0 ? __x4 : __x;
1707 __mask += __mask;
1708 auto __x2
1709 = __vector_bitcast<_Up>(__vector_bitcast<short>(__x) << 2);
1710 __x2 &= char(0xfc);
1711 __x = __vector_bitcast<_SChar>(__mask) < 0 ? __x2 : __x;
1712 __mask += __mask;
1713 auto __x1 = __x + __x;
1714 __x = __vector_bitcast<_SChar>(__mask) < 0 ? __x1 : __x;
1715 return __x
1716 & ((__y & char(0xf8)) == 0); // y > 7 nulls the result
1717 }
1718 else
1719 return __x << __y;
1720 }
1721 else if constexpr (sizeof(_Up) == 2)
1722 {
1723 if constexpr (sizeof __ix == 64 && __have_avx512bw)
1724 return __vector_bitcast<_Up>(_mm512_sllv_epi16(__ix, __iy));
1725 else if constexpr (sizeof __ix == 32 && __have_avx512bw_vl)
1726 return __vector_bitcast<_Up>(_mm256_sllv_epi16(__ix, __iy));
1727 else if constexpr (sizeof __ix == 32 && __have_avx512bw)
1728 return __vector_bitcast<_Up>(
1729 __lo256(_mm512_sllv_epi16(_mm512_castsi256_si512(__ix),
1730 _mm512_castsi256_si512(__iy))));
1731 else if constexpr (sizeof __ix == 32 && __have_avx2)
1732 {
1733 const auto __ux = __vector_bitcast<unsigned>(__x);
1734 const auto __uy = __vector_bitcast<unsigned>(__y);
1735 return __vector_bitcast<_Up>(_mm256_blend_epi16(
1736 __auto_bitcast(__ux << (__uy & 0x0000ffffu)),
1737 __auto_bitcast((__ux & 0xffff0000u) << (__uy >> 16)), 0xaa));
1738 }
1739 else if constexpr (sizeof __ix == 16 && __have_avx512bw_vl)
1740 return __intrin_bitcast<_V>(_mm_sllv_epi16(__ix, __iy));
1741 else if constexpr (sizeof __ix == 16 && __have_avx512bw)
1742 return __intrin_bitcast<_V>(
1743 __lo128(_mm512_sllv_epi16(_mm512_castsi128_si512(__ix),
1744 _mm512_castsi128_si512(__iy))));
1745 else if constexpr (sizeof __ix == 16 && __have_avx2)
1746 {
1747 const auto __ux = __vector_bitcast<unsigned>(__ix);
1748 const auto __uy = __vector_bitcast<unsigned>(__iy);
1749 return __intrin_bitcast<_V>(_mm_blend_epi16(
1750 __auto_bitcast(__ux << (__uy & 0x0000ffffu)),
1751 __auto_bitcast((__ux & 0xffff0000u) << (__uy >> 16)), 0xaa));
1752 }
1753 else if constexpr (sizeof __ix == 16)
1754 {
1755 using _Float4 = __vector_type_t<float, 4>;
1756 using _Int4 = __vector_type_t<int, 4>;
1757 using _UInt4 = __vector_type_t<unsigned, 4>;
1758 const _UInt4 __yu
1759 = reinterpret_cast<_UInt4>(__to_intrin(__y + (0x3f8 >> 3)));
1760 return __x
1761 * __intrin_bitcast<_V>(
1762 __vector_convert<_Int4>(_SimdWrapper<float, 4>(
1763 reinterpret_cast<_Float4>(__yu << 23)))
1764 | (__vector_convert<_Int4>(_SimdWrapper<float, 4>(
1765 reinterpret_cast<_Float4>((__yu >> 16) << 23)))
1766 << 16));
1767 }
1768 else
1769 __assert_unreachable<_Tp>();
1770 }
1771 else if constexpr (sizeof(_Up) == 4 && sizeof __ix == 16
1772 && !__have_avx2)
1773 // latency is suboptimal, but throughput is at full speedup
1774 return __intrin_bitcast<_V>(
1775 __vector_bitcast<unsigned>(__ix)
1776 * __vector_convert<__vector_type16_t<int>>(
1777 _SimdWrapper<float, 4>(__vector_bitcast<float>(
1778 (__vector_bitcast<unsigned, 4>(__y) << 23) + 0x3f80'0000))));
1779 else if constexpr (sizeof(_Up) == 8 && sizeof __ix == 16
1780 && !__have_avx2)
1781 {
1782 const auto __lo = _mm_sll_epi64(__ix, __iy);
1783 const auto __hi
1784 = _mm_sll_epi64(__ix, _mm_unpackhi_epi64(__iy, __iy));
1785 if constexpr (__have_sse4_1)
1786 return __vector_bitcast<_Up>(_mm_blend_epi16(__lo, __hi, 0xf0));
1787 else
1788 return __vector_bitcast<_Up>(
1789 _mm_move_sd(__vector_bitcast<double>(__hi),
1790 __vector_bitcast<double>(__lo)));
1791 }
1792 else
1793 return __x << __y;
1794 }
1795#endif // _GLIBCXX_SIMD_NO_SHIFT_OPT
1796
1797 // }}}
1798 // _S_bit_shift_right {{{
1799#ifndef _GLIBCXX_SIMD_NO_SHIFT_OPT
1800 template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1801 constexpr inline _GLIBCXX_CONST static typename _TVT::type
1802 _S_bit_shift_right(_Tp __xx, int __y)
1803 {
1804 using _V = typename _TVT::type;
1805 using _Up = typename _TVT::value_type;
1806 _V __x = __xx;
1807 [[maybe_unused]] const auto __ix = __to_intrin(__x);
1808 if (__builtin_is_constant_evaluated())
1809 return __x >> __y;
1810 else if (__builtin_constant_p(__y)
1811 && is_unsigned_v<
1812 _Up> && __y >= int(sizeof(_Up) * __CHAR_BIT__))
1813 return _V();
1814 else if constexpr (sizeof(_Up) == 1 && is_unsigned_v<_Up>) //{{{
1815 return __intrin_bitcast<_V>(__vector_bitcast<_UShort>(__ix) >> __y)
1816 & _Up(0xff >> __y);
1817 //}}}
1818 else if constexpr (sizeof(_Up) == 1 && is_signed_v<_Up>) //{{{
1819 return __intrin_bitcast<_V>(
1820 (__vector_bitcast<_UShort>(__vector_bitcast<short>(__ix)
1821 >> (__y + 8))
1822 << 8)
1823 | (__vector_bitcast<_UShort>(
1824 __vector_bitcast<short>(__vector_bitcast<_UShort>(__ix) << 8)
1825 >> __y)
1826 >> 8));
1827 //}}}
1828 // GCC optimizes sizeof == 2, 4, and unsigned 8 as expected
1829 else if constexpr (sizeof(_Up) == 8 && is_signed_v<_Up>) //{{{
1830 {
1831 if (__y > 32)
1832 return (__intrin_bitcast<_V>(__vector_bitcast<int>(__ix) >> 32)
1833 & _Up(0xffff'ffff'0000'0000ull))
1834 | __vector_bitcast<_Up>(
1835 __vector_bitcast<int>(__vector_bitcast<_ULLong>(__ix)
1836 >> 32)
1837 >> (__y - 32));
1838 else
1839 return __intrin_bitcast<_V>(__vector_bitcast<_ULLong>(__ix)
1840 >> __y)
1841 | __vector_bitcast<_Up>(
1842 __vector_bitcast<int>(__ix & -0x8000'0000'0000'0000ll)
1843 >> __y);
1844 }
1845 //}}}
1846 else
1847 return __x >> __y;
1848 }
1849
1850 template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1851 constexpr inline _GLIBCXX_CONST static typename _TVT::type
1852 _S_bit_shift_right(_Tp __xx, typename _TVT::type __y)
1853 {
1854 using _V = typename _TVT::type;
1855 using _Up = typename _TVT::value_type;
1856 _V __x = __xx;
1857 [[maybe_unused]] const auto __ix = __to_intrin(__x);
1858 [[maybe_unused]] const auto __iy = __to_intrin(__y);
1859 if (__builtin_is_constant_evaluated()
1860 || (__builtin_constant_p(__x) && __builtin_constant_p(__y)))
1861 return __x >> __y;
1862 else if constexpr (sizeof(_Up) == 1) //{{{
1863 {
1864 if constexpr (sizeof(__x) <= 8 && __have_avx512bw_vl)
1865 return __intrin_bitcast<_V>(_mm_cvtepi16_epi8(
1866 is_signed_v<_Up> ? _mm_srav_epi16(_mm_cvtepi8_epi16(__ix),
1867 _mm_cvtepi8_epi16(__iy))
1868 : _mm_srlv_epi16(_mm_cvtepu8_epi16(__ix),
1869 _mm_cvtepu8_epi16(__iy))));
1870 if constexpr (sizeof(__x) == 16 && __have_avx512bw_vl)
1871 return __intrin_bitcast<_V>(_mm256_cvtepi16_epi8(
1872 is_signed_v<_Up>
1873 ? _mm256_srav_epi16(_mm256_cvtepi8_epi16(__ix),
1874 _mm256_cvtepi8_epi16(__iy))
1875 : _mm256_srlv_epi16(_mm256_cvtepu8_epi16(__ix),
1876 _mm256_cvtepu8_epi16(__iy))));
1877 else if constexpr (sizeof(__x) == 32 && __have_avx512bw)
1878 return __vector_bitcast<_Up>(_mm512_cvtepi16_epi8(
1879 is_signed_v<_Up>
1880 ? _mm512_srav_epi16(_mm512_cvtepi8_epi16(__ix),
1881 _mm512_cvtepi8_epi16(__iy))
1882 : _mm512_srlv_epi16(_mm512_cvtepu8_epi16(__ix),
1883 _mm512_cvtepu8_epi16(__iy))));
1884 else if constexpr (sizeof(__x) == 64 && is_signed_v<_Up>)
1885 return __vector_bitcast<_Up>(_mm512_mask_mov_epi8(
1886 _mm512_srav_epi16(__ix, _mm512_srli_epi16(__iy, 8)),
1887 0x5555'5555'5555'5555ull,
1888 _mm512_srav_epi16(
1889 _mm512_slli_epi16(__ix, 8),
1890 _mm512_maskz_add_epi8(0x5555'5555'5555'5555ull, __iy,
1891 _mm512_set1_epi16(8)))));
1892 else if constexpr (sizeof(__x) == 64 && is_unsigned_v<_Up>)
1893 return __vector_bitcast<_Up>(_mm512_mask_mov_epi8(
1894 _mm512_srlv_epi16(__ix, _mm512_srli_epi16(__iy, 8)),
1895 0x5555'5555'5555'5555ull,
1896 _mm512_srlv_epi16(
1897 _mm512_maskz_mov_epi8(0x5555'5555'5555'5555ull, __ix),
1898 _mm512_maskz_mov_epi8(0x5555'5555'5555'5555ull, __iy))));
1899 /* This has better throughput but higher latency than the impl below
1900 else if constexpr (__have_avx2 && sizeof(__x) == 16 &&
1901 is_unsigned_v<_Up>)
1902 {
1903 const auto __shorts = __to_intrin(_S_bit_shift_right(
1904 __vector_bitcast<_UShort>(_mm256_cvtepu8_epi16(__ix)),
1905 __vector_bitcast<_UShort>(_mm256_cvtepu8_epi16(__iy))));
1906 return __vector_bitcast<_Up>(
1907 _mm_packus_epi16(__lo128(__shorts), __hi128(__shorts)));
1908 }
1909 */
1910 else if constexpr (__have_avx2 && sizeof(__x) > 8)
1911 // the following uses vpsr[al]vd, which requires AVX2
1912 if constexpr (is_signed_v<_Up>)
1913 {
1914 const auto r3 = __vector_bitcast<_UInt>(
1915 (__vector_bitcast<int>(__x)
1916 >> (__vector_bitcast<_UInt>(__y) >> 24)))
1917 & 0xff000000u;
1918 const auto r2
1919 = __vector_bitcast<_UInt>(
1920 ((__vector_bitcast<int>(__x) << 8)
1921 >> ((__vector_bitcast<_UInt>(__y) << 8) >> 24)))
1922 & 0xff000000u;
1923 const auto r1
1924 = __vector_bitcast<_UInt>(
1925 ((__vector_bitcast<int>(__x) << 16)
1926 >> ((__vector_bitcast<_UInt>(__y) << 16) >> 24)))
1927 & 0xff000000u;
1928 const auto r0 = __vector_bitcast<_UInt>(
1929 (__vector_bitcast<int>(__x) << 24)
1930 >> ((__vector_bitcast<_UInt>(__y) << 24) >> 24));
1931 return __vector_bitcast<_Up>(r3 | (r2 >> 8) | (r1 >> 16)
1932 | (r0 >> 24));
1933 }
1934 else
1935 {
1936 const auto r3 = (__vector_bitcast<_UInt>(__x)
1937 >> (__vector_bitcast<_UInt>(__y) >> 24))
1938 & 0xff000000u;
1939 const auto r2
1940 = ((__vector_bitcast<_UInt>(__x) << 8)
1941 >> ((__vector_bitcast<_UInt>(__y) << 8) >> 24))
1942 & 0xff000000u;
1943 const auto r1
1944 = ((__vector_bitcast<_UInt>(__x) << 16)
1945 >> ((__vector_bitcast<_UInt>(__y) << 16) >> 24))
1946 & 0xff000000u;
1947 const auto r0
1948 = (__vector_bitcast<_UInt>(__x) << 24)
1949 >> ((__vector_bitcast<_UInt>(__y) << 24) >> 24);
1950 return __vector_bitcast<_Up>(r3 | (r2 >> 8) | (r1 >> 16)
1951 | (r0 >> 24));
1952 }
1953 else if constexpr (__have_sse4_1
1954 && is_unsigned_v<_Up> && sizeof(__x) > 2)
1955 {
1956 auto __x128 = __vector_bitcast<_Up>(__ix);
1957 auto __mask
1958 = __vector_bitcast<_Up>(__vector_bitcast<_UShort>(__iy) << 5);
1959 auto __x4 = __vector_bitcast<_Up>(
1960 (__vector_bitcast<_UShort>(__x128) >> 4) & _UShort(0xff0f));
1961 __x128 = __vector_bitcast<_Up>(_CommonImplX86::_S_blend_intrin(
1962 __to_intrin(__mask), __to_intrin(__x128), __to_intrin(__x4)));
1963 __mask += __mask;
1964 auto __x2 = __vector_bitcast<_Up>(
1965 (__vector_bitcast<_UShort>(__x128) >> 2) & _UShort(0xff3f));
1966 __x128 = __vector_bitcast<_Up>(_CommonImplX86::_S_blend_intrin(
1967 __to_intrin(__mask), __to_intrin(__x128), __to_intrin(__x2)));
1968 __mask += __mask;
1969 auto __x1 = __vector_bitcast<_Up>(
1970 (__vector_bitcast<_UShort>(__x128) >> 1) & _UShort(0xff7f));
1971 __x128 = __vector_bitcast<_Up>(_CommonImplX86::_S_blend_intrin(
1972 __to_intrin(__mask), __to_intrin(__x128), __to_intrin(__x1)));
1973 return __intrin_bitcast<_V>(
1974 __x128
1975 & ((__vector_bitcast<_Up>(__iy) & char(0xf8))
1976 == 0)); // y > 7 nulls the result
1977 }
1978 else if constexpr (__have_sse4_1
1979 && is_signed_v<_Up> && sizeof(__x) > 2)
1980 {
1981 auto __mask = __vector_bitcast<_UChar>(
1982 __vector_bitcast<_UShort>(__iy) << 5);
1983 auto __maskl = [&]() {
1984 return __to_intrin(__vector_bitcast<_UShort>(__mask) << 8);
1985 };
1986 auto __xh = __vector_bitcast<short>(__ix);
1987 auto __xl = __vector_bitcast<short>(__ix) << 8;
1988 auto __xh4 = __xh >> 4;
1989 auto __xl4 = __xl >> 4;
1990 __xh = __vector_bitcast<short>(_CommonImplX86::_S_blend_intrin(
1991 __to_intrin(__mask), __to_intrin(__xh), __to_intrin(__xh4)));
1992 __xl = __vector_bitcast<short>(
1993 _CommonImplX86::_S_blend_intrin(__maskl(), __to_intrin(__xl),
1994 __to_intrin(__xl4)));
1995 __mask += __mask;
1996 auto __xh2 = __xh >> 2;
1997 auto __xl2 = __xl >> 2;
1998 __xh = __vector_bitcast<short>(_CommonImplX86::_S_blend_intrin(
1999 __to_intrin(__mask), __to_intrin(__xh), __to_intrin(__xh2)));
2000 __xl = __vector_bitcast<short>(
2001 _CommonImplX86::_S_blend_intrin(__maskl(), __to_intrin(__xl),
2002 __to_intrin(__xl2)));
2003 __mask += __mask;
2004 auto __xh1 = __xh >> 1;
2005 auto __xl1 = __xl >> 1;
2006 __xh = __vector_bitcast<short>(_CommonImplX86::_S_blend_intrin(
2007 __to_intrin(__mask), __to_intrin(__xh), __to_intrin(__xh1)));
2008 __xl = __vector_bitcast<short>(
2009 _CommonImplX86::_S_blend_intrin(__maskl(), __to_intrin(__xl),
2010 __to_intrin(__xl1)));
2011 return __intrin_bitcast<_V>(
2012 (__vector_bitcast<_Up>((__xh & short(0xff00)))
2013 | __vector_bitcast<_Up>(__vector_bitcast<_UShort>(__xl)
2014 >> 8))
2015 & ((__vector_bitcast<_Up>(__iy) & char(0xf8))
2016 == 0)); // y > 7 nulls the result
2017 }
2018 else if constexpr (is_unsigned_v<_Up> && sizeof(__x) > 2) // SSE2
2019 {
2020 auto __mask
2021 = __vector_bitcast<_Up>(__vector_bitcast<_UShort>(__y) << 5);
2022 auto __x4 = __vector_bitcast<_Up>(
2023 (__vector_bitcast<_UShort>(__x) >> 4) & _UShort(0xff0f));
2024 __x = __mask > 0x7f ? __x4 : __x;
2025 __mask += __mask;
2026 auto __x2 = __vector_bitcast<_Up>(
2027 (__vector_bitcast<_UShort>(__x) >> 2) & _UShort(0xff3f));
2028 __x = __mask > 0x7f ? __x2 : __x;
2029 __mask += __mask;
2030 auto __x1 = __vector_bitcast<_Up>(
2031 (__vector_bitcast<_UShort>(__x) >> 1) & _UShort(0xff7f));
2032 __x = __mask > 0x7f ? __x1 : __x;
2033 return __x
2034 & ((__y & char(0xf8)) == 0); // y > 7 nulls the result
2035 }
2036 else if constexpr (sizeof(__x) > 2) // signed SSE2
2037 {
2038 static_assert(is_signed_v<_Up>);
2039 auto __maskh = __vector_bitcast<_UShort>(__y) << 5;
2040 auto __maskl = __vector_bitcast<_UShort>(__y) << (5 + 8);
2041 auto __xh = __vector_bitcast<short>(__x);
2042 auto __xl = __vector_bitcast<short>(__x) << 8;
2043 auto __xh4 = __xh >> 4;
2044 auto __xl4 = __xl >> 4;
2045 __xh = __maskh > 0x7fff ? __xh4 : __xh;
2046 __xl = __maskl > 0x7fff ? __xl4 : __xl;
2047 __maskh += __maskh;
2048 __maskl += __maskl;
2049 auto __xh2 = __xh >> 2;
2050 auto __xl2 = __xl >> 2;
2051 __xh = __maskh > 0x7fff ? __xh2 : __xh;
2052 __xl = __maskl > 0x7fff ? __xl2 : __xl;
2053 __maskh += __maskh;
2054 __maskl += __maskl;
2055 auto __xh1 = __xh >> 1;
2056 auto __xl1 = __xl >> 1;
2057 __xh = __maskh > 0x7fff ? __xh1 : __xh;
2058 __xl = __maskl > 0x7fff ? __xl1 : __xl;
2059 __x = __vector_bitcast<_Up>((__xh & short(0xff00)))
2060 | __vector_bitcast<_Up>(__vector_bitcast<_UShort>(__xl)
2061 >> 8);
2062 return __x
2063 & ((__y & char(0xf8)) == 0); // y > 7 nulls the result
2064 }
2065 else
2066 return __x >> __y;
2067 } //}}}
2068 else if constexpr (sizeof(_Up) == 2 && sizeof(__x) >= 4) //{{{
2069 {
2070 [[maybe_unused]] auto __blend_0xaa = [](auto __a, auto __b) {
2071 if constexpr (sizeof(__a) == 16)
2072 return _mm_blend_epi16(__to_intrin(__a), __to_intrin(__b),
2073 0xaa);
2074 else if constexpr (sizeof(__a) == 32)
2075 return _mm256_blend_epi16(__to_intrin(__a), __to_intrin(__b),
2076 0xaa);
2077 else if constexpr (sizeof(__a) == 64)
2078 return _mm512_mask_blend_epi16(0xaaaa'aaaaU, __to_intrin(__a),
2079 __to_intrin(__b));
2080 else
2081 __assert_unreachable<decltype(__a)>();
2082 };
2083 if constexpr (__have_avx512bw_vl && sizeof(_Tp) <= 16)
2084 return __intrin_bitcast<_V>(is_signed_v<_Up>
2085 ? _mm_srav_epi16(__ix, __iy)
2086 : _mm_srlv_epi16(__ix, __iy));
2087 else if constexpr (__have_avx512bw_vl && sizeof(_Tp) == 32)
2088 return __vector_bitcast<_Up>(is_signed_v<_Up>
2089 ? _mm256_srav_epi16(__ix, __iy)
2090 : _mm256_srlv_epi16(__ix, __iy));
2091 else if constexpr (__have_avx512bw && sizeof(_Tp) == 64)
2092 return __vector_bitcast<_Up>(is_signed_v<_Up>
2093 ? _mm512_srav_epi16(__ix, __iy)
2094 : _mm512_srlv_epi16(__ix, __iy));
2095 else if constexpr (__have_avx2 && is_signed_v<_Up>)
2096 return __intrin_bitcast<_V>(
2097 __blend_0xaa(((__vector_bitcast<int>(__ix) << 16)
2098 >> (__vector_bitcast<int>(__iy) & 0xffffu))
2099 >> 16,
2100 __vector_bitcast<int>(__ix)
2101 >> (__vector_bitcast<int>(__iy) >> 16)));
2102 else if constexpr (__have_avx2 && is_unsigned_v<_Up>)
2103 return __intrin_bitcast<_V>(
2104 __blend_0xaa((__vector_bitcast<_UInt>(__ix) & 0xffffu)
2105 >> (__vector_bitcast<_UInt>(__iy) & 0xffffu),
2106 __vector_bitcast<_UInt>(__ix)
2107 >> (__vector_bitcast<_UInt>(__iy) >> 16)));
2108 else if constexpr (__have_sse4_1)
2109 {
2110 auto __mask = __vector_bitcast<_UShort>(__iy);
2111 auto __x128 = __vector_bitcast<_Up>(__ix);
2112 //__mask *= 0x0808;
2113 __mask = (__mask << 3) | (__mask << 11);
2114 // do __x128 = 0 where __y[4] is set
2115 __x128 = __vector_bitcast<_Up>(
2116 _mm_blendv_epi8(__to_intrin(__x128), __m128i(),
2117 __to_intrin(__mask)));
2118 // do __x128 =>> 8 where __y[3] is set
2119 __x128 = __vector_bitcast<_Up>(
2120 _mm_blendv_epi8(__to_intrin(__x128), __to_intrin(__x128 >> 8),
2121 __to_intrin(__mask += __mask)));
2122 // do __x128 =>> 4 where __y[2] is set
2123 __x128 = __vector_bitcast<_Up>(
2124 _mm_blendv_epi8(__to_intrin(__x128), __to_intrin(__x128 >> 4),
2125 __to_intrin(__mask += __mask)));
2126 // do __x128 =>> 2 where __y[1] is set
2127 __x128 = __vector_bitcast<_Up>(
2128 _mm_blendv_epi8(__to_intrin(__x128), __to_intrin(__x128 >> 2),
2129 __to_intrin(__mask += __mask)));
2130 // do __x128 =>> 1 where __y[0] is set
2131 return __intrin_bitcast<_V>(
2132 _mm_blendv_epi8(__to_intrin(__x128), __to_intrin(__x128 >> 1),
2133 __to_intrin(__mask + __mask)));
2134 }
2135 else
2136 {
2137 auto __k = __vector_bitcast<_UShort>(__iy) << 11;
2138 auto __x128 = __vector_bitcast<_Up>(__ix);
2139 auto __mask = [](__vector_type16_t<_UShort> __kk) {
2140 return __vector_bitcast<short>(__kk) < 0;
2141 };
2142 // do __x128 = 0 where __y[4] is set
2143 __x128 = __mask(__k) ? decltype(__x128)() : __x128;
2144 // do __x128 =>> 8 where __y[3] is set
2145 __x128 = __mask(__k += __k) ? __x128 >> 8 : __x128;
2146 // do __x128 =>> 4 where __y[2] is set
2147 __x128 = __mask(__k += __k) ? __x128 >> 4 : __x128;
2148 // do __x128 =>> 2 where __y[1] is set
2149 __x128 = __mask(__k += __k) ? __x128 >> 2 : __x128;
2150 // do __x128 =>> 1 where __y[0] is set
2151 return __intrin_bitcast<_V>(__mask(__k + __k) ? __x128 >> 1
2152 : __x128);
2153 }
2154 } //}}}
2155 else if constexpr (sizeof(_Up) == 4 && !__have_avx2) //{{{
2156 {
2157 if constexpr (is_unsigned_v<_Up>)
2158 {
2159 // x >> y == x * 2^-y == (x * 2^(31-y)) >> 31
2160 const __m128 __factor_f = reinterpret_cast<__m128>(
2161 0x4f00'0000u - (__vector_bitcast<unsigned, 4>(__y) << 23));
2162 const __m128i __factor
2163 = __builtin_constant_p(__factor_f)
2164 ? __to_intrin(
2165 __make_vector<unsigned>(__factor_f[0], __factor_f[1],
2166 __factor_f[2], __factor_f[3]))
2167 : _mm_cvttps_epi32(__factor_f);
2168 const auto __r02
2169 = _mm_srli_epi64(_mm_mul_epu32(__ix, __factor), 31);
2170 const auto __r13 = _mm_mul_epu32(_mm_srli_si128(__ix, 4),
2171 _mm_srli_si128(__factor, 4));
2172 if constexpr (__have_sse4_1)
2173 return __intrin_bitcast<_V>(
2174 _mm_blend_epi16(_mm_slli_epi64(__r13, 1), __r02, 0x33));
2175 else
2176 return __intrin_bitcast<_V>(
2177 __r02 | _mm_slli_si128(_mm_srli_epi64(__r13, 31), 4));
2178 }
2179 else
2180 {
2181 auto __shift = [](auto __a, auto __b) {
2182 if constexpr (is_signed_v<_Up>)
2183 return _mm_sra_epi32(__a, __b);
2184 else
2185 return _mm_srl_epi32(__a, __b);
2186 };
2187 const auto __r0
2188 = __shift(__ix, _mm_unpacklo_epi32(__iy, __m128i()));
2189 const auto __r1 = __shift(__ix, _mm_srli_epi64(__iy, 32));
2190 const auto __r2
2191 = __shift(__ix, _mm_unpackhi_epi32(__iy, __m128i()));
2192 const auto __r3 = __shift(__ix, _mm_srli_si128(__iy, 12));
2193 if constexpr (__have_sse4_1)
2194 return __intrin_bitcast<_V>(
2195 _mm_blend_epi16(_mm_blend_epi16(__r1, __r0, 0x3),
2196 _mm_blend_epi16(__r3, __r2, 0x30), 0xf0));
2197 else
2198 return __intrin_bitcast<_V>(_mm_unpacklo_epi64(
2199 _mm_unpacklo_epi32(__r0, _mm_srli_si128(__r1, 4)),
2200 _mm_unpackhi_epi32(__r2, _mm_srli_si128(__r3, 4))));
2201 }
2202 } //}}}
2203 else
2204 return __x >> __y;
2205 }
2206#endif // _GLIBCXX_SIMD_NO_SHIFT_OPT
2207
2208 // }}}
2209 // compares {{{
2210 // _S_equal_to {{{
2211 template <typename _Tp, size_t _Np>
2212 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
2213 _S_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2214 {
2215 if constexpr (__is_avx512_abi<_Abi>()) // {{{
2216 {
2217 if (__builtin_is_constant_evaluated()
2218 || (__x._M_is_constprop() && __y._M_is_constprop()))
2219 return _MaskImpl::_S_to_bits(
2220 __as_wrapper<_Np>(__x._M_data == __y._M_data));
2221
2222 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2223 [[maybe_unused]] const auto __xi = __to_intrin(__x);
2224 [[maybe_unused]] const auto __yi = __to_intrin(__y);
2225 if constexpr (is_floating_point_v<_Tp>)
2226 {
2227 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
2228 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_EQ_OQ);
2229 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
2230 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_EQ_OQ);
2231 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
2232 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_EQ_OQ);
2233 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
2234 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_EQ_OQ);
2235 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
2236 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_EQ_OQ);
2237 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
2238 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_EQ_OQ);
2239 else
2240 __assert_unreachable<_Tp>();
2241 }
2242 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
2243 return _mm512_mask_cmpeq_epi64_mask(__k1, __xi, __yi);
2244 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
2245 return _mm512_mask_cmpeq_epi32_mask(__k1, __xi, __yi);
2246 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 2)
2247 return _mm512_mask_cmpeq_epi16_mask(__k1, __xi, __yi);
2248 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 1)
2249 return _mm512_mask_cmpeq_epi8_mask(__k1, __xi, __yi);
2250 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
2251 return _mm256_mask_cmpeq_epi64_mask(__k1, __xi, __yi);
2252 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
2253 return _mm256_mask_cmpeq_epi32_mask(__k1, __xi, __yi);
2254 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 2)
2255 return _mm256_mask_cmpeq_epi16_mask(__k1, __xi, __yi);
2256 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 1)
2257 return _mm256_mask_cmpeq_epi8_mask(__k1, __xi, __yi);
2258 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
2259 return _mm_mask_cmpeq_epi64_mask(__k1, __xi, __yi);
2260 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
2261 return _mm_mask_cmpeq_epi32_mask(__k1, __xi, __yi);
2262 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 2)
2263 return _mm_mask_cmpeq_epi16_mask(__k1, __xi, __yi);
2264 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 1)
2265 return _mm_mask_cmpeq_epi8_mask(__k1, __xi, __yi);
2266 else
2267 __assert_unreachable<_Tp>();
2268 } // }}}
2269 else if (__builtin_is_constant_evaluated())
2270 return _Base::_S_equal_to(__x, __y);
2271 else if constexpr (sizeof(__x) == 8) // {{{
2272 {
2273 const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
2274 == __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
2275 _MaskMember<_Tp> __r64;
2276 __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
2277 return __r64;
2278 } // }}}
2279 else
2280 return _Base::_S_equal_to(__x, __y);
2281 }
2282
2283 // }}}
2284 // _S_not_equal_to {{{
2285 template <typename _Tp, size_t _Np>
2286 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
2287 _S_not_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2288 {
2289 if constexpr (__is_avx512_abi<_Abi>()) // {{{
2290 {
2291 if (__builtin_is_constant_evaluated()
2292 || (__x._M_is_constprop() && __y._M_is_constprop()))
2293 return _MaskImpl::_S_to_bits(
2294 __as_wrapper<_Np>(__x._M_data != __y._M_data));
2295
2296 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2297 [[maybe_unused]] const auto __xi = __to_intrin(__x);
2298 [[maybe_unused]] const auto __yi = __to_intrin(__y);
2299 if constexpr (is_floating_point_v<_Tp>)
2300 {
2301 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
2302 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_NEQ_UQ);
2303 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
2304 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_NEQ_UQ);
2305 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
2306 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_NEQ_UQ);
2307 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
2308 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_NEQ_UQ);
2309 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
2310 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_NEQ_UQ);
2311 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
2312 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_NEQ_UQ);
2313 else
2314 __assert_unreachable<_Tp>();
2315 }
2316 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
2317 return ~_mm512_mask_cmpeq_epi64_mask(__k1, __xi, __yi);
2318 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
2319 return ~_mm512_mask_cmpeq_epi32_mask(__k1, __xi, __yi);
2320 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 2)
2321 return ~_mm512_mask_cmpeq_epi16_mask(__k1, __xi, __yi);
2322 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 1)
2323 return ~_mm512_mask_cmpeq_epi8_mask(__k1, __xi, __yi);
2324 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
2325 return ~_mm256_mask_cmpeq_epi64_mask(__k1, __xi, __yi);
2326 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
2327 return ~_mm256_mask_cmpeq_epi32_mask(__k1, __xi, __yi);
2328 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 2)
2329 return ~_mm256_mask_cmpeq_epi16_mask(__k1, __xi, __yi);
2330 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 1)
2331 return ~_mm256_mask_cmpeq_epi8_mask(__k1, __xi, __yi);
2332 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
2333 return ~_mm_mask_cmpeq_epi64_mask(__k1, __xi, __yi);
2334 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
2335 return ~_mm_mask_cmpeq_epi32_mask(__k1, __xi, __yi);
2336 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 2)
2337 return ~_mm_mask_cmpeq_epi16_mask(__k1, __xi, __yi);
2338 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 1)
2339 return ~_mm_mask_cmpeq_epi8_mask(__k1, __xi, __yi);
2340 else
2341 __assert_unreachable<_Tp>();
2342 } // }}}
2343 else if (__builtin_is_constant_evaluated())
2344 return _Base::_S_not_equal_to(__x, __y);
2345 else if constexpr (sizeof(__x) == 8)
2346 {
2347 const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
2348 != __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
2349 _MaskMember<_Tp> __r64;
2350 __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
2351 return __r64;
2352 }
2353 else
2354 return _Base::_S_not_equal_to(__x, __y);
2355 }
2356
2357 // }}}
2358 // _S_less {{{
2359 template <typename _Tp, size_t _Np>
2360 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
2361 _S_less(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2362 {
2363 if constexpr (__is_avx512_abi<_Abi>()) // {{{
2364 {
2365 if (__builtin_is_constant_evaluated()
2366 || (__x._M_is_constprop() && __y._M_is_constprop()))
2367 return _MaskImpl::_S_to_bits(
2368 __as_wrapper<_Np>(__x._M_data < __y._M_data));
2369
2370 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2371 [[maybe_unused]] const auto __xi = __to_intrin(__x);
2372 [[maybe_unused]] const auto __yi = __to_intrin(__y);
2373 if constexpr (sizeof(__xi) == 64)
2374 {
2375 if constexpr (is_same_v<_Tp, float>)
2376 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LT_OS);
2377 else if constexpr (is_same_v<_Tp, double>)
2378 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LT_OS);
2379 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 1)
2380 return _mm512_mask_cmplt_epi8_mask(__k1, __xi, __yi);
2381 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 2)
2382 return _mm512_mask_cmplt_epi16_mask(__k1, __xi, __yi);
2383 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 4)
2384 return _mm512_mask_cmplt_epi32_mask(__k1, __xi, __yi);
2385 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 8)
2386 return _mm512_mask_cmplt_epi64_mask(__k1, __xi, __yi);
2387 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 1)
2388 return _mm512_mask_cmplt_epu8_mask(__k1, __xi, __yi);
2389 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 2)
2390 return _mm512_mask_cmplt_epu16_mask(__k1, __xi, __yi);
2391 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 4)
2392 return _mm512_mask_cmplt_epu32_mask(__k1, __xi, __yi);
2393 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 8)
2394 return _mm512_mask_cmplt_epu64_mask(__k1, __xi, __yi);
2395 else
2396 __assert_unreachable<_Tp>();
2397 }
2398 else if constexpr (sizeof(__xi) == 32)
2399 {
2400 if constexpr (is_same_v<_Tp, float>)
2401 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LT_OS);
2402 else if constexpr (is_same_v<_Tp, double>)
2403 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LT_OS);
2404 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 1)
2405 return _mm256_mask_cmplt_epi8_mask(__k1, __xi, __yi);
2406 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 2)
2407 return _mm256_mask_cmplt_epi16_mask(__k1, __xi, __yi);
2408 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 4)
2409 return _mm256_mask_cmplt_epi32_mask(__k1, __xi, __yi);
2410 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 8)
2411 return _mm256_mask_cmplt_epi64_mask(__k1, __xi, __yi);
2412 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 1)
2413 return _mm256_mask_cmplt_epu8_mask(__k1, __xi, __yi);
2414 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 2)
2415 return _mm256_mask_cmplt_epu16_mask(__k1, __xi, __yi);
2416 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 4)
2417 return _mm256_mask_cmplt_epu32_mask(__k1, __xi, __yi);
2418 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 8)
2419 return _mm256_mask_cmplt_epu64_mask(__k1, __xi, __yi);
2420 else
2421 __assert_unreachable<_Tp>();
2422 }
2423 else if constexpr (sizeof(__xi) == 16)
2424 {
2425 if constexpr (is_same_v<_Tp, float>)
2426 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LT_OS);
2427 else if constexpr (is_same_v<_Tp, double>)
2428 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LT_OS);
2429 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 1)
2430 return _mm_mask_cmplt_epi8_mask(__k1, __xi, __yi);
2431 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 2)
2432 return _mm_mask_cmplt_epi16_mask(__k1, __xi, __yi);
2433 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 4)
2434 return _mm_mask_cmplt_epi32_mask(__k1, __xi, __yi);
2435 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 8)
2436 return _mm_mask_cmplt_epi64_mask(__k1, __xi, __yi);
2437 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 1)
2438 return _mm_mask_cmplt_epu8_mask(__k1, __xi, __yi);
2439 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 2)
2440 return _mm_mask_cmplt_epu16_mask(__k1, __xi, __yi);
2441 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 4)
2442 return _mm_mask_cmplt_epu32_mask(__k1, __xi, __yi);
2443 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 8)
2444 return _mm_mask_cmplt_epu64_mask(__k1, __xi, __yi);
2445 else
2446 __assert_unreachable<_Tp>();
2447 }
2448 else
2449 __assert_unreachable<_Tp>();
2450 } // }}}
2451 else if (__builtin_is_constant_evaluated())
2452 return _Base::_S_less(__x, __y);
2453 else if constexpr (sizeof(__x) == 8)
2454 {
2455 const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
2456 < __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
2457 _MaskMember<_Tp> __r64;
2458 __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
2459 return __r64;
2460 }
2461 else
2462 return _Base::_S_less(__x, __y);
2463 }
2464
2465 // }}}
2466 // _S_less_equal {{{
2467 template <typename _Tp, size_t _Np>
2468 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
2469 _S_less_equal(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2470 {
2471 if constexpr (__is_avx512_abi<_Abi>()) // {{{
2472 {
2473 if (__builtin_is_constant_evaluated()
2474 || (__x._M_is_constprop() && __y._M_is_constprop()))
2475 return _MaskImpl::_S_to_bits(
2476 __as_wrapper<_Np>(__x._M_data <= __y._M_data));
2477
2478 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2479 [[maybe_unused]] const auto __xi = __to_intrin(__x);
2480 [[maybe_unused]] const auto __yi = __to_intrin(__y);
2481 if constexpr (sizeof(__xi) == 64)
2482 {
2483 if constexpr (is_same_v<_Tp, float>)
2484 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LE_OS);
2485 else if constexpr (is_same_v<_Tp, double>)
2486 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LE_OS);
2487 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 1)
2488 return _mm512_mask_cmple_epi8_mask(__k1, __xi, __yi);
2489 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 2)
2490 return _mm512_mask_cmple_epi16_mask(__k1, __xi, __yi);
2491 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 4)
2492 return _mm512_mask_cmple_epi32_mask(__k1, __xi, __yi);
2493 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 8)
2494 return _mm512_mask_cmple_epi64_mask(__k1, __xi, __yi);
2495 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 1)
2496 return _mm512_mask_cmple_epu8_mask(__k1, __xi, __yi);
2497 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 2)
2498 return _mm512_mask_cmple_epu16_mask(__k1, __xi, __yi);
2499 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 4)
2500 return _mm512_mask_cmple_epu32_mask(__k1, __xi, __yi);
2501 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 8)
2502 return _mm512_mask_cmple_epu64_mask(__k1, __xi, __yi);
2503 else
2504 __assert_unreachable<_Tp>();
2505 }
2506 else if constexpr (sizeof(__xi) == 32)
2507 {
2508 if constexpr (is_same_v<_Tp, float>)
2509 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LE_OS);
2510 else if constexpr (is_same_v<_Tp, double>)
2511 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LE_OS);
2512 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 1)
2513 return _mm256_mask_cmple_epi8_mask(__k1, __xi, __yi);
2514 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 2)
2515 return _mm256_mask_cmple_epi16_mask(__k1, __xi, __yi);
2516 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 4)
2517 return _mm256_mask_cmple_epi32_mask(__k1, __xi, __yi);
2518 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 8)
2519 return _mm256_mask_cmple_epi64_mask(__k1, __xi, __yi);
2520 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 1)
2521 return _mm256_mask_cmple_epu8_mask(__k1, __xi, __yi);
2522 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 2)
2523 return _mm256_mask_cmple_epu16_mask(__k1, __xi, __yi);
2524 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 4)
2525 return _mm256_mask_cmple_epu32_mask(__k1, __xi, __yi);
2526 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 8)
2527 return _mm256_mask_cmple_epu64_mask(__k1, __xi, __yi);
2528 else
2529 __assert_unreachable<_Tp>();
2530 }
2531 else if constexpr (sizeof(__xi) == 16)
2532 {
2533 if constexpr (is_same_v<_Tp, float>)
2534 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LE_OS);
2535 else if constexpr (is_same_v<_Tp, double>)
2536 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LE_OS);
2537 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 1)
2538 return _mm_mask_cmple_epi8_mask(__k1, __xi, __yi);
2539 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 2)
2540 return _mm_mask_cmple_epi16_mask(__k1, __xi, __yi);
2541 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 4)
2542 return _mm_mask_cmple_epi32_mask(__k1, __xi, __yi);
2543 else if constexpr (is_signed_v<_Tp> && sizeof(_Tp) == 8)
2544 return _mm_mask_cmple_epi64_mask(__k1, __xi, __yi);
2545 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 1)
2546 return _mm_mask_cmple_epu8_mask(__k1, __xi, __yi);
2547 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 2)
2548 return _mm_mask_cmple_epu16_mask(__k1, __xi, __yi);
2549 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 4)
2550 return _mm_mask_cmple_epu32_mask(__k1, __xi, __yi);
2551 else if constexpr (is_unsigned_v<_Tp> && sizeof(_Tp) == 8)
2552 return _mm_mask_cmple_epu64_mask(__k1, __xi, __yi);
2553 else
2554 __assert_unreachable<_Tp>();
2555 }
2556 else
2557 __assert_unreachable<_Tp>();
2558 } // }}}
2559 else if (__builtin_is_constant_evaluated())
2560 return _Base::_S_less_equal(__x, __y);
2561 else if constexpr (sizeof(__x) == 8)
2562 {
2563 const auto __r128 = __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__x)
2564 <= __vector_bitcast<_Tp, 16 / sizeof(_Tp)>(__y);
2565 _MaskMember<_Tp> __r64;
2566 __builtin_memcpy(&__r64._M_data, &__r128, sizeof(__r64));
2567 return __r64;
2568 }
2569 else
2570 return _Base::_S_less_equal(__x, __y);
2571 }
2572
2573 // }}} }}}
2574 // negation {{{
2575 template <typename _Tp, size_t _Np>
2576 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
2577 _S_negate(_SimdWrapper<_Tp, _Np> __x) noexcept
2578 {
2579 if constexpr (__is_avx512_abi<_Abi>())
2580 return _S_equal_to(__x, _SimdWrapper<_Tp, _Np>());
2581 else
2582 return _Base::_S_negate(__x);
2583 }
2584
2585 // }}}
2586 // math {{{
2587 using _Base::_S_abs;
2588
2589 // _S_sqrt {{{
2590 template <typename _Tp, size_t _Np>
2591 _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
2592 _S_sqrt(_SimdWrapper<_Tp, _Np> __x)
2593 {
2594 if constexpr (__is_sse_ps<_Tp, _Np>())
2595 return __auto_bitcast(_mm_sqrt_ps(__to_intrin(__x)));
2596 else if constexpr (__is_sse_pd<_Tp, _Np>())
2597 return _mm_sqrt_pd(__x);
2598 else if constexpr (__is_avx_ps<_Tp, _Np>())
2599 return _mm256_sqrt_ps(__x);
2600 else if constexpr (__is_avx_pd<_Tp, _Np>())
2601 return _mm256_sqrt_pd(__x);
2602 else if constexpr (__is_avx512_ps<_Tp, _Np>())
2603 return _mm512_sqrt_ps(__x);
2604 else if constexpr (__is_avx512_pd<_Tp, _Np>())
2605 return _mm512_sqrt_pd(__x);
2606 else
2607 __assert_unreachable<_Tp>();
2608 }
2609
2610 // }}}
2611 // _S_ldexp {{{
2612 template <typename _Tp, size_t _Np>
2613 _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
2614 _S_ldexp(_SimdWrapper<_Tp, _Np> __x,
2615 __fixed_size_storage_t<int, _Np> __exp)
2616 {
2617 if constexpr (__is_avx512_abi<_Abi>())
2618 {
2619 const auto __xi = __to_intrin(__x);
2620 constexpr _SimdConverter<int, simd_abi::fixed_size<_Np>, _Tp, _Abi>
2621 __cvt;
2622 const auto __expi = __to_intrin(__cvt(__exp));
2623 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2624 if constexpr (sizeof(__xi) == 16)
2625 {
2626 if constexpr (sizeof(_Tp) == 8)
2627 return _mm_maskz_scalef_pd(__k1, __xi, __expi);
2628 else
2629 return _mm_maskz_scalef_ps(__k1, __xi, __expi);
2630 }
2631 else if constexpr (sizeof(__xi) == 32)
2632 {
2633 if constexpr (sizeof(_Tp) == 8)
2634 return _mm256_maskz_scalef_pd(__k1, __xi, __expi);
2635 else
2636 return _mm256_maskz_scalef_ps(__k1, __xi, __expi);
2637 }
2638 else
2639 {
2640 static_assert(sizeof(__xi) == 64);
2641 if constexpr (sizeof(_Tp) == 8)
2642 return _mm512_maskz_scalef_pd(__k1, __xi, __expi);
2643 else
2644 return _mm512_maskz_scalef_ps(__k1, __xi, __expi);
2645 }
2646 }
2647 else
2648 return _Base::_S_ldexp(__x, __exp);
2649 }
2650
2651 // }}}
2652 // _S_trunc {{{
2653 template <typename _Tp, size_t _Np>
2654 _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
2655 _S_trunc(_SimdWrapper<_Tp, _Np> __x)
2656 {
2657 if constexpr (__is_avx512_ps<_Tp, _Np>())
2658 return _mm512_roundscale_ps(__x, 0x0b);
2659 else if constexpr (__is_avx512_pd<_Tp, _Np>())
2660 return _mm512_roundscale_pd(__x, 0x0b);
2661 else if constexpr (__is_avx_ps<_Tp, _Np>())
2662 return _mm256_round_ps(__x, 0x3);
2663 else if constexpr (__is_avx_pd<_Tp, _Np>())
2664 return _mm256_round_pd(__x, 0x3);
2665 else if constexpr (__have_sse4_1 && __is_sse_ps<_Tp, _Np>())
2666 return __auto_bitcast(_mm_round_ps(__to_intrin(__x), 0x3));
2667 else if constexpr (__have_sse4_1 && __is_sse_pd<_Tp, _Np>())
2668 return _mm_round_pd(__x, 0x3);
2669 else if constexpr (__is_sse_ps<_Tp, _Np>())
2670 {
2671 auto __truncated
2672 = _mm_cvtepi32_ps(_mm_cvttps_epi32(__to_intrin(__x)));
2673 const auto __no_fractional_values
2674 = __vector_bitcast<int>(__vector_bitcast<_UInt>(__to_intrin(__x))
2675 & 0x7f800000u)
2676 < 0x4b000000; // the exponent is so large that no mantissa bits
2677 // signify fractional values (0x3f8 + 23*8 =
2678 // 0x4b0)
2679 return __no_fractional_values ? __truncated : __to_intrin(__x);
2680 }
2681 else
2682 return _Base::_S_trunc(__x);
2683 }
2684
2685 // }}}
2686 // _S_round {{{
2687 template <typename _Tp, size_t _Np>
2688 _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
2689 _S_round(_SimdWrapper<_Tp, _Np> __x)
2690 {
2691 // Note that _MM_FROUND_TO_NEAREST_INT rounds ties to even, not away
2692 // from zero as required by std::round. Therefore this function is more
2693 // complicated.
2694 using _V = __vector_type_t<_Tp, _Np>;
2695 _V __truncated;
2696 if constexpr (__is_avx512_ps<_Tp, _Np>())
2697 __truncated = _mm512_roundscale_ps(__x._M_data, 0x0b);
2698 else if constexpr (__is_avx512_pd<_Tp, _Np>())
2699 __truncated = _mm512_roundscale_pd(__x._M_data, 0x0b);
2700 else if constexpr (__is_avx_ps<_Tp, _Np>())
2701 __truncated = _mm256_round_ps(__x._M_data,
2702 _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
2703 else if constexpr (__is_avx_pd<_Tp, _Np>())
2704 __truncated = _mm256_round_pd(__x._M_data,
2705 _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
2706 else if constexpr (__have_sse4_1 && __is_sse_ps<_Tp, _Np>())
2707 __truncated = __auto_bitcast(
2708 _mm_round_ps(__to_intrin(__x),
2709 _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
2710 else if constexpr (__have_sse4_1 && __is_sse_pd<_Tp, _Np>())
2711 __truncated
2712 = _mm_round_pd(__x._M_data, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
2713 else if constexpr (__is_sse_ps<_Tp, _Np>())
2714 __truncated = __auto_bitcast(
2715 _mm_cvtepi32_ps(_mm_cvttps_epi32(__to_intrin(__x))));
2716 else
2717 return _Base::_S_round(__x);
2718
2719 // x < 0 => truncated <= 0 && truncated >= x => x - truncated <= 0
2720 // x > 0 => truncated >= 0 && truncated <= x => x - truncated >= 0
2721
2722 const _V __rounded
2723 = __truncated
2724 + (__and(_S_absmask<_V>, __x._M_data - __truncated) >= _Tp(.5)
2725 ? __or(__and(_S_signmask<_V>, __x._M_data), _V() + 1)
2726 : _V());
2727 if constexpr (__have_sse4_1)
2728 return __rounded;
2729 else // adjust for missing range in cvttps_epi32
2730 return __and(_S_absmask<_V>, __x._M_data) < 0x1p23f ? __rounded
2731 : __x._M_data;
2732 }
2733
2734 // }}}
2735 // _S_nearbyint {{{
2736 template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2737 _GLIBCXX_SIMD_INTRINSIC static _Tp _S_nearbyint(_Tp __x) noexcept
2738 {
2739 if constexpr (_TVT::template _S_is<float, 16>)
2740 return _mm512_roundscale_ps(__x, 0x0c);
2741 else if constexpr (_TVT::template _S_is<double, 8>)
2742 return _mm512_roundscale_pd(__x, 0x0c);
2743 else if constexpr (_TVT::template _S_is<float, 8>)
2744 return _mm256_round_ps(__x,
2745 _MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC);
2746 else if constexpr (_TVT::template _S_is<double, 4>)
2747 return _mm256_round_pd(__x,
2748 _MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC);
2749 else if constexpr (__have_sse4_1 && _TVT::template _S_is<float, 4>)
2750 return _mm_round_ps(__x,
2751 _MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC);
2752 else if constexpr (__have_sse4_1 && _TVT::template _S_is<double, 2>)
2753 return _mm_round_pd(__x,
2754 _MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC);
2755 else
2756 return _Base::_S_nearbyint(__x);
2757 }
2758
2759 // }}}
2760 // _S_rint {{{
2761 template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2762 _GLIBCXX_SIMD_INTRINSIC static _Tp _S_rint(_Tp __x) noexcept
2763 {
2764 if constexpr (_TVT::template _S_is<float, 16>)
2765 return _mm512_roundscale_ps(__x, 0x04);
2766 else if constexpr (_TVT::template _S_is<double, 8>)
2767 return _mm512_roundscale_pd(__x, 0x04);
2768 else if constexpr (_TVT::template _S_is<float, 8>)
2769 return _mm256_round_ps(__x, _MM_FROUND_CUR_DIRECTION);
2770 else if constexpr (_TVT::template _S_is<double, 4>)
2771 return _mm256_round_pd(__x, _MM_FROUND_CUR_DIRECTION);
2772 else if constexpr (__have_sse4_1 && _TVT::template _S_is<float, 4>)
2773 return _mm_round_ps(__x, _MM_FROUND_CUR_DIRECTION);
2774 else if constexpr (__have_sse4_1 && _TVT::template _S_is<double, 2>)
2775 return _mm_round_pd(__x, _MM_FROUND_CUR_DIRECTION);
2776 else
2777 return _Base::_S_rint(__x);
2778 }
2779
2780 // }}}
2781 // _S_floor {{{
2782 template <typename _Tp, size_t _Np>
2783 _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
2784 _S_floor(_SimdWrapper<_Tp, _Np> __x)
2785 {
2786 if constexpr (__is_avx512_ps<_Tp, _Np>())
2787 return _mm512_roundscale_ps(__x, 0x09);
2788 else if constexpr (__is_avx512_pd<_Tp, _Np>())
2789 return _mm512_roundscale_pd(__x, 0x09);
2790 else if constexpr (__is_avx_ps<_Tp, _Np>())
2791 return _mm256_round_ps(__x, 0x1);
2792 else if constexpr (__is_avx_pd<_Tp, _Np>())
2793 return _mm256_round_pd(__x, 0x1);
2794 else if constexpr (__have_sse4_1 && __is_sse_ps<_Tp, _Np>())
2795 return __auto_bitcast(_mm_floor_ps(__to_intrin(__x)));
2796 else if constexpr (__have_sse4_1 && __is_sse_pd<_Tp, _Np>())
2797 return _mm_floor_pd(__x);
2798 else
2799 return _Base::_S_floor(__x);
2800 }
2801
2802 // }}}
2803 // _S_ceil {{{
2804 template <typename _Tp, size_t _Np>
2805 _GLIBCXX_SIMD_INTRINSIC static _SimdWrapper<_Tp, _Np>
2806 _S_ceil(_SimdWrapper<_Tp, _Np> __x)
2807 {
2808 if constexpr (__is_avx512_ps<_Tp, _Np>())
2809 return _mm512_roundscale_ps(__x, 0x0a);
2810 else if constexpr (__is_avx512_pd<_Tp, _Np>())
2811 return _mm512_roundscale_pd(__x, 0x0a);
2812 else if constexpr (__is_avx_ps<_Tp, _Np>())
2813 return _mm256_round_ps(__x, 0x2);
2814 else if constexpr (__is_avx_pd<_Tp, _Np>())
2815 return _mm256_round_pd(__x, 0x2);
2816 else if constexpr (__have_sse4_1 && __is_sse_ps<_Tp, _Np>())
2817 return __auto_bitcast(_mm_ceil_ps(__to_intrin(__x)));
2818 else if constexpr (__have_sse4_1 && __is_sse_pd<_Tp, _Np>())
2819 return _mm_ceil_pd(__x);
2820 else
2821 return _Base::_S_ceil(__x);
2822 }
2823
2824 // }}}
2825 // _S_signbit {{{
2826 template <typename _Tp, size_t _Np>
2827 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
2828 _S_signbit(_SimdWrapper<_Tp, _Np> __x)
2829 {
2830 if constexpr (__is_avx512_abi<_Abi>() && __have_avx512dq)
2831 {
2832 if constexpr (sizeof(__x) == 64 && sizeof(_Tp) == 4)
2833 return _mm512_movepi32_mask(
2834 __intrin_bitcast<__m512i>(__x._M_data));
2835 else if constexpr (sizeof(__x) == 64 && sizeof(_Tp) == 8)
2836 return _mm512_movepi64_mask(
2837 __intrin_bitcast<__m512i>(__x._M_data));
2838 else if constexpr (sizeof(__x) == 32 && sizeof(_Tp) == 4)
2839 return _mm256_movepi32_mask(
2840 __intrin_bitcast<__m256i>(__x._M_data));
2841 else if constexpr (sizeof(__x) == 32 && sizeof(_Tp) == 8)
2842 return _mm256_movepi64_mask(
2843 __intrin_bitcast<__m256i>(__x._M_data));
2844 else if constexpr (sizeof(__x) <= 16 && sizeof(_Tp) == 4)
2845 return _mm_movepi32_mask(__intrin_bitcast<__m128i>(__x._M_data));
2846 else if constexpr (sizeof(__x) <= 16 && sizeof(_Tp) == 8)
2847 return _mm_movepi64_mask(__intrin_bitcast<__m128i>(__x._M_data));
2848 }
2849 else if constexpr (__is_avx512_abi<_Abi>())
2850 {
2851 const auto __xi = __to_intrin(__x);
2852 [[maybe_unused]] constexpr auto __k1
2853 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2854 if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
2855 return _mm_movemask_ps(__xi);
2856 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
2857 return _mm_movemask_pd(__xi);
2858 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
2859 return _mm256_movemask_ps(__xi);
2860 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
2861 return _mm256_movemask_pd(__xi);
2862 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
2863 return _mm512_mask_cmplt_epi32_mask(
2864 __k1, __intrin_bitcast<__m512i>(__xi), __m512i());
2865 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
2866 return _mm512_mask_cmplt_epi64_mask(
2867 __k1, __intrin_bitcast<__m512i>(__xi), __m512i());
2868 else
2869 __assert_unreachable<_Tp>();
2870 }
2871 else
2872 return _Base::_S_signbit(__x);
2873 /*{
2874 using _I = __int_for_sizeof_t<_Tp>;
2875 if constexpr (sizeof(__x) == 64)
2876 return _S_less(__vector_bitcast<_I>(__x), _I());
2877 else
2878 {
2879 const auto __xx = __vector_bitcast<_I>(__x._M_data);
2880 [[maybe_unused]] constexpr _I __signmask = __finite_min_v<_I>;
2881 if constexpr ((sizeof(_Tp) == 4 &&
2882 (__have_avx2 || sizeof(__x) == 16)) ||
2883 __have_avx512vl)
2884 {
2885 return __vector_bitcast<_Tp>(__xx >> __digits_v<_I>);
2886 }
2887 else if constexpr ((__have_avx2 ||
2888 (__have_ssse3 && sizeof(__x) == 16)))
2889 {
2890 return __vector_bitcast<_Tp>((__xx & __signmask) ==
2891 __signmask);
2892 }
2893 else
2894 { // SSE2/3 or AVX (w/o AVX2)
2895 constexpr auto __one = __vector_broadcast<_Np, _Tp>(1);
2896 return __vector_bitcast<_Tp>(
2897 __vector_bitcast<_Tp>(
2898 (__xx & __signmask) |
2899 __vector_bitcast<_I>(__one)) // -1 or 1
2900 != __one);
2901 }
2902 }
2903 }*/
2904 }
2905
2906 // }}}
2907 // _S_isnonzerovalue_mask {{{
2908 // (isnormal | is subnormal == !isinf & !isnan & !is zero)
2909 template <typename _Tp>
2910 _GLIBCXX_SIMD_INTRINSIC static auto _S_isnonzerovalue_mask(_Tp __x)
2911 {
2912 using _Traits = _VectorTraits<_Tp>;
2913 if constexpr (__have_avx512dq_vl)
2914 {
2915 if constexpr (_Traits::template _S_is<
2916 float, 2> || _Traits::template _S_is<float, 4>)
2917 return _knot_mask8(_mm_fpclass_ps_mask(__to_intrin(__x), 0x9f));
2918 else if constexpr (_Traits::template _S_is<float, 8>)
2919 return _knot_mask8(_mm256_fpclass_ps_mask(__x, 0x9f));
2920 else if constexpr (_Traits::template _S_is<float, 16>)
2921 return _knot_mask16(_mm512_fpclass_ps_mask(__x, 0x9f));
2922 else if constexpr (_Traits::template _S_is<double, 2>)
2923 return _knot_mask8(_mm_fpclass_pd_mask(__x, 0x9f));
2924 else if constexpr (_Traits::template _S_is<double, 4>)
2925 return _knot_mask8(_mm256_fpclass_pd_mask(__x, 0x9f));
2926 else if constexpr (_Traits::template _S_is<double, 8>)
2927 return _knot_mask8(_mm512_fpclass_pd_mask(__x, 0x9f));
2928 else
2929 __assert_unreachable<_Tp>();
2930 }
2931 else
2932 {
2933 using _Up = typename _Traits::value_type;
2934 constexpr size_t _Np = _Traits::_S_full_size;
2935 const auto __a = __x * __infinity_v<_Up>; // NaN if __x == 0
2936 const auto __b = __x * _Up(); // NaN if __x == inf
2937 if constexpr (__have_avx512vl && __is_sse_ps<_Up, _Np>())
2938 return _mm_cmp_ps_mask(__to_intrin(__a), __to_intrin(__b),
2939 _CMP_ORD_Q);
2940 else if constexpr (__have_avx512f && __is_sse_ps<_Up, _Np>())
2941 return __mmask8(0xf
2942 & _mm512_cmp_ps_mask(__auto_bitcast(__a),
2943 __auto_bitcast(__b),
2944 _CMP_ORD_Q));
2945 else if constexpr (__have_avx512vl && __is_sse_pd<_Up, _Np>())
2946 return _mm_cmp_pd_mask(__a, __b, _CMP_ORD_Q);
2947 else if constexpr (__have_avx512f && __is_sse_pd<_Up, _Np>())
2948 return __mmask8(0x3
2949 & _mm512_cmp_pd_mask(__auto_bitcast(__a),
2950 __auto_bitcast(__b),
2951 _CMP_ORD_Q));
2952 else if constexpr (__have_avx512vl && __is_avx_ps<_Up, _Np>())
2953 return _mm256_cmp_ps_mask(__a, __b, _CMP_ORD_Q);
2954 else if constexpr (__have_avx512f && __is_avx_ps<_Up, _Np>())
2955 return __mmask8(_mm512_cmp_ps_mask(__auto_bitcast(__a),
2956 __auto_bitcast(__b),
2957 _CMP_ORD_Q));
2958 else if constexpr (__have_avx512vl && __is_avx_pd<_Up, _Np>())
2959 return _mm256_cmp_pd_mask(__a, __b, _CMP_ORD_Q);
2960 else if constexpr (__have_avx512f && __is_avx_pd<_Up, _Np>())
2961 return __mmask8(0xf
2962 & _mm512_cmp_pd_mask(__auto_bitcast(__a),
2963 __auto_bitcast(__b),
2964 _CMP_ORD_Q));
2965 else if constexpr (__is_avx512_ps<_Up, _Np>())
2966 return _mm512_cmp_ps_mask(__a, __b, _CMP_ORD_Q);
2967 else if constexpr (__is_avx512_pd<_Up, _Np>())
2968 return _mm512_cmp_pd_mask(__a, __b, _CMP_ORD_Q);
2969 else
2970 __assert_unreachable<_Tp>();
2971 }
2972 }
2973
2974 // }}}
2975 // _S_isfinite {{{
2976 template <typename _Tp, size_t _Np>
2977 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
2978 _S_isfinite(_SimdWrapper<_Tp, _Np> __x)
2979 {
2980 static_assert(is_floating_point_v<_Tp>);
2981#if !__FINITE_MATH_ONLY__
2982 if constexpr (__is_avx512_abi<_Abi>() && __have_avx512dq)
2983 {
2984 const auto __xi = __to_intrin(__x);
2985 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
2986 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
2987 return __k1 ^ _mm512_mask_fpclass_ps_mask(__k1, __xi, 0x99);
2988 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
2989 return __k1 ^ _mm512_mask_fpclass_pd_mask(__k1, __xi, 0x99);
2990 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
2991 return __k1 ^ _mm256_mask_fpclass_ps_mask(__k1, __xi, 0x99);
2992 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
2993 return __k1 ^ _mm256_mask_fpclass_pd_mask(__k1, __xi, 0x99);
2994 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
2995 return __k1 ^ _mm_mask_fpclass_ps_mask(__k1, __xi, 0x99);
2996 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
2997 return __k1 ^ _mm_mask_fpclass_pd_mask(__k1, __xi, 0x99);
2998 }
2999 else if constexpr (__is_avx512_abi<_Abi>())
3000 {
3001 // if all exponent bits are set, __x is either inf or NaN
3002 using _I = __int_for_sizeof_t<_Tp>;
3003 const auto __inf = __vector_bitcast<_I>(
3004 __vector_broadcast<_Np>(__infinity_v<_Tp>));
3005 return _S_less<_I, _Np>(__vector_bitcast<_I>(__x) & __inf, __inf);
3006 }
3007 else
3008#endif
3009 return _Base::_S_isfinite(__x);
3010 }
3011
3012 // }}}
3013 // _S_isinf {{{
3014 template <typename _Tp, size_t _Np>
3015 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
3016 _S_isinf(_SimdWrapper<_Tp, _Np> __x)
3017 {
3018#if !__FINITE_MATH_ONLY__
3019 if constexpr (__is_avx512_abi<_Abi>() && __have_avx512dq)
3020 {
3021 const auto __xi = __to_intrin(__x);
3022 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3023 return _mm512_fpclass_ps_mask(__xi, 0x18);
3024 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3025 return _mm512_fpclass_pd_mask(__xi, 0x18);
3026 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3027 return _mm256_fpclass_ps_mask(__xi, 0x18);
3028 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3029 return _mm256_fpclass_pd_mask(__xi, 0x18);
3030 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3031 return _mm_fpclass_ps_mask(__xi, 0x18);
3032 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3033 return _mm_fpclass_pd_mask(__xi, 0x18);
3034 else
3035 __assert_unreachable<_Tp>();
3036 }
3037 else if constexpr (__have_avx512dq_vl)
3038 {
3039 if constexpr (__is_sse_pd<_Tp, _Np>())
3040 return _mm_movm_epi64(_mm_fpclass_pd_mask(__x, 0x18));
3041 else if constexpr (__is_avx_pd<_Tp, _Np>())
3042 return _mm256_movm_epi64(_mm256_fpclass_pd_mask(__x, 0x18));
3043 else if constexpr (__is_sse_ps<_Tp, _Np>())
3044 return _mm_movm_epi32(
3045 _mm_fpclass_ps_mask(__to_intrin(__x), 0x18));
3046 else if constexpr (__is_avx_ps<_Tp, _Np>())
3047 return _mm256_movm_epi32(_mm256_fpclass_ps_mask(__x, 0x18));
3048 else
3049 __assert_unreachable<_Tp>();
3050 }
3051 else
3052#endif
3053 return _Base::_S_isinf(__x);
3054 }
3055
3056 // }}}
3057 // _S_isnormal {{{
3058 template <typename _Tp, size_t _Np>
3059 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
3060 _S_isnormal(_SimdWrapper<_Tp, _Np> __x)
3061 {
3062#if __FINITE_MATH_ONLY__
3063 [[maybe_unused]] constexpr int __mode = 0x26;
3064#else
3065 [[maybe_unused]] constexpr int __mode = 0xbf;
3066#endif
3067 if constexpr (__is_avx512_abi<_Abi>() && __have_avx512dq)
3068 {
3069 const auto __xi = __to_intrin(__x);
3070 const auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3071 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3072 return __k1 ^ _mm512_mask_fpclass_ps_mask(__k1, __xi, __mode);
3073 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3074 return __k1 ^ _mm512_mask_fpclass_pd_mask(__k1, __xi, __mode);
3075 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3076 return __k1 ^ _mm256_mask_fpclass_ps_mask(__k1, __xi, __mode);
3077 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3078 return __k1 ^ _mm256_mask_fpclass_pd_mask(__k1, __xi, __mode);
3079 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3080 return __k1 ^ _mm_mask_fpclass_ps_mask(__k1, __xi, __mode);
3081 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3082 return __k1 ^ _mm_mask_fpclass_pd_mask(__k1, __xi, __mode);
3083 else
3084 __assert_unreachable<_Tp>();
3085 }
3086 else if constexpr (__have_avx512dq)
3087 {
3088 if constexpr (__have_avx512vl && __is_sse_ps<_Tp, _Np>())
3089 return _mm_movm_epi32(
3090 _knot_mask8(_mm_fpclass_ps_mask(__to_intrin(__x), __mode)));
3091 else if constexpr (__have_avx512vl && __is_avx_ps<_Tp, _Np>())
3092 return _mm256_movm_epi32(
3093 _knot_mask8(_mm256_fpclass_ps_mask(__x, __mode)));
3094 else if constexpr (__is_avx512_ps<_Tp, _Np>())
3095 return _knot_mask16(_mm512_fpclass_ps_mask(__x, __mode));
3096 else if constexpr (__have_avx512vl && __is_sse_pd<_Tp, _Np>())
3097 return _mm_movm_epi64(
3098 _knot_mask8(_mm_fpclass_pd_mask(__x, __mode)));
3099 else if constexpr (__have_avx512vl && __is_avx_pd<_Tp, _Np>())
3100 return _mm256_movm_epi64(
3101 _knot_mask8(_mm256_fpclass_pd_mask(__x, __mode)));
3102 else if constexpr (__is_avx512_pd<_Tp, _Np>())
3103 return _knot_mask8(_mm512_fpclass_pd_mask(__x, __mode));
3104 else
3105 __assert_unreachable<_Tp>();
3106 }
3107 else if constexpr (__is_avx512_abi<_Abi>())
3108 {
3109 using _I = __int_for_sizeof_t<_Tp>;
3110 const auto absn = __vector_bitcast<_I>(_S_abs(__x));
3111 const auto minn = __vector_bitcast<_I>(
3112 __vector_broadcast<_Np>(__norm_min_v<_Tp>));
3113#if __FINITE_MATH_ONLY__
3114 return _S_less_equal<_I, _Np>(minn, absn);
3115#else
3116 const auto infn
3117 = __vector_bitcast<_I>(__vector_broadcast<_Np>(__infinity_v<_Tp>));
3118 return __and(_S_less_equal<_I, _Np>(minn, absn),
3119 _S_less<_I, _Np>(absn, infn));
3120#endif
3121 }
3122 else
3123 return _Base::_S_isnormal(__x);
3124 }
3125
3126 // }}}
3127 // _S_isnan {{{
3128 template <typename _Tp, size_t _Np>
3129 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
3130 _S_isnan(_SimdWrapper<_Tp, _Np> __x)
3131 { return _S_isunordered(__x, __x); }
3132
3133 // }}}
3134 // _S_isunordered {{{
3135 template <typename _Tp, size_t _Np>
3136 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
3137 _S_isunordered([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x,
3138 [[maybe_unused]] _SimdWrapper<_Tp, _Np> __y)
3139 {
3140#if __FINITE_MATH_ONLY__
3141 return {}; // false
3142#else
3143 const auto __xi = __to_intrin(__x);
3144 const auto __yi = __to_intrin(__y);
3145 if constexpr (__is_avx512_abi<_Abi>())
3146 {
3147 constexpr auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3148 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3149 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_UNORD_Q);
3150 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3151 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_UNORD_Q);
3152 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3153 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_UNORD_Q);
3154 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3155 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_UNORD_Q);
3156 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3157 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_UNORD_Q);
3158 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3159 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_UNORD_Q);
3160 }
3161 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3162 return __to_masktype(_mm256_cmp_ps(__xi, __yi, _CMP_UNORD_Q));
3163 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3164 return __to_masktype(_mm256_cmp_pd(__xi, __yi, _CMP_UNORD_Q));
3165 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3166 return __auto_bitcast(_mm_cmpunord_ps(__xi, __yi));
3167 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3168 return __to_masktype(_mm_cmpunord_pd(__xi, __yi));
3169 else
3170 __assert_unreachable<_Tp>();
3171#endif
3172 }
3173
3174 // }}}
3175 // _S_isgreater {{{
3176 template <typename _Tp, size_t _Np>
3177 static constexpr _MaskMember<_Tp> _S_isgreater(_SimdWrapper<_Tp, _Np> __x,
3178 _SimdWrapper<_Tp, _Np> __y)
3179 {
3180 const auto __xi = __to_intrin(__x);
3181 const auto __yi = __to_intrin(__y);
3182 if constexpr (__is_avx512_abi<_Abi>())
3183 {
3184 const auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3185 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3186 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_GT_OQ);
3187 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3188 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_GT_OQ);
3189 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3190 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_GT_OQ);
3191 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3192 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_GT_OQ);
3193 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3194 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_GT_OQ);
3195 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3196 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_GT_OQ);
3197 else
3198 __assert_unreachable<_Tp>();
3199 }
3200 else if constexpr (__have_avx)
3201 {
3202 if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3203 return __to_masktype(_mm256_cmp_ps(__xi, __yi, _CMP_GT_OQ));
3204 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3205 return __to_masktype(_mm256_cmp_pd(__xi, __yi, _CMP_GT_OQ));
3206 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3207 return __auto_bitcast(_mm_cmp_ps(__xi, __yi, _CMP_GT_OQ));
3208 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3209 return __to_masktype(_mm_cmp_pd(__xi, __yi, _CMP_GT_OQ));
3210 else
3211 __assert_unreachable<_Tp>();
3212 }
3213 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3214 && sizeof(_Tp) == 4)
3215 {
3216 const auto __xn = __vector_bitcast<int>(__xi);
3217 const auto __yn = __vector_bitcast<int>(__yi);
3218 const auto __xp = __xn < 0 ? -(__xn & 0x7fff'ffff) : __xn;
3219 const auto __yp = __yn < 0 ? -(__yn & 0x7fff'ffff) : __yn;
3220 return __auto_bitcast(
3221 __and(__to_masktype(_mm_cmpord_ps(__xi, __yi)), __xp > __yp));
3222 }
3223 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3224 && sizeof(_Tp) == 8)
3225 return __vector_type_t<__int_with_sizeof_t<8>, 2>{
3226 -_mm_ucomigt_sd(__xi, __yi),
3227 -_mm_ucomigt_sd(_mm_unpackhi_pd(__xi, __xi),
3228 _mm_unpackhi_pd(__yi, __yi))};
3229 else
3230 return _Base::_S_isgreater(__x, __y);
3231 }
3232
3233 // }}}
3234 // _S_isgreaterequal {{{
3235 template <typename _Tp, size_t _Np>
3236 static constexpr _MaskMember<_Tp>
3237 _S_isgreaterequal(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
3238 {
3239 const auto __xi = __to_intrin(__x);
3240 const auto __yi = __to_intrin(__y);
3241 if constexpr (__is_avx512_abi<_Abi>())
3242 {
3243 const auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3244 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3245 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_GE_OQ);
3246 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3247 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_GE_OQ);
3248 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3249 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_GE_OQ);
3250 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3251 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_GE_OQ);
3252 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3253 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_GE_OQ);
3254 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3255 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_GE_OQ);
3256 else
3257 __assert_unreachable<_Tp>();
3258 }
3259 else if constexpr (__have_avx)
3260 {
3261 if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3262 return __to_masktype(_mm256_cmp_ps(__xi, __yi, _CMP_GE_OQ));
3263 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3264 return __to_masktype(_mm256_cmp_pd(__xi, __yi, _CMP_GE_OQ));
3265 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3266 return __auto_bitcast(_mm_cmp_ps(__xi, __yi, _CMP_GE_OQ));
3267 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3268 return __to_masktype(_mm_cmp_pd(__xi, __yi, _CMP_GE_OQ));
3269 else
3270 __assert_unreachable<_Tp>();
3271 }
3272 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3273 && sizeof(_Tp) == 4)
3274 {
3275 const auto __xn = __vector_bitcast<int>(__xi);
3276 const auto __yn = __vector_bitcast<int>(__yi);
3277 const auto __xp = __xn < 0 ? -(__xn & 0x7fff'ffff) : __xn;
3278 const auto __yp = __yn < 0 ? -(__yn & 0x7fff'ffff) : __yn;
3279 return __auto_bitcast(
3280 __and(__to_masktype(_mm_cmpord_ps(__xi, __yi)), __xp >= __yp));
3281 }
3282 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3283 && sizeof(_Tp) == 8)
3284 return __vector_type_t<__int_with_sizeof_t<8>, 2>{
3285 -_mm_ucomige_sd(__xi, __yi),
3286 -_mm_ucomige_sd(_mm_unpackhi_pd(__xi, __xi),
3287 _mm_unpackhi_pd(__yi, __yi))};
3288 else
3289 return _Base::_S_isgreaterequal(__x, __y);
3290 }
3291
3292 // }}}
3293 // _S_isless {{{
3294 template <typename _Tp, size_t _Np>
3295 static constexpr _MaskMember<_Tp> _S_isless(_SimdWrapper<_Tp, _Np> __x,
3296 _SimdWrapper<_Tp, _Np> __y)
3297 {
3298 const auto __xi = __to_intrin(__x);
3299 const auto __yi = __to_intrin(__y);
3300 if constexpr (__is_avx512_abi<_Abi>())
3301 {
3302 const auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3303 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3304 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LT_OQ);
3305 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3306 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LT_OQ);
3307 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3308 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LT_OQ);
3309 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3310 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LT_OQ);
3311 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3312 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LT_OQ);
3313 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3314 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LT_OQ);
3315 else
3316 __assert_unreachable<_Tp>();
3317 }
3318 else if constexpr (__have_avx)
3319 {
3320 if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3321 return __to_masktype(_mm256_cmp_ps(__xi, __yi, _CMP_LT_OQ));
3322 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3323 return __to_masktype(_mm256_cmp_pd(__xi, __yi, _CMP_LT_OQ));
3324 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3325 return __auto_bitcast(_mm_cmp_ps(__xi, __yi, _CMP_LT_OQ));
3326 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3327 return __to_masktype(_mm_cmp_pd(__xi, __yi, _CMP_LT_OQ));
3328 else
3329 __assert_unreachable<_Tp>();
3330 }
3331 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3332 && sizeof(_Tp) == 4)
3333 {
3334 const auto __xn = __vector_bitcast<int>(__xi);
3335 const auto __yn = __vector_bitcast<int>(__yi);
3336 const auto __xp = __xn < 0 ? -(__xn & 0x7fff'ffff) : __xn;
3337 const auto __yp = __yn < 0 ? -(__yn & 0x7fff'ffff) : __yn;
3338 return __auto_bitcast(
3339 __and(__to_masktype(_mm_cmpord_ps(__xi, __yi)), __xp < __yp));
3340 }
3341 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3342 && sizeof(_Tp) == 8)
3343 return __vector_type_t<__int_with_sizeof_t<8>, 2>{
3344 -_mm_ucomigt_sd(__yi, __xi),
3345 -_mm_ucomigt_sd(_mm_unpackhi_pd(__yi, __yi),
3346 _mm_unpackhi_pd(__xi, __xi))};
3347 else
3348 return _Base::_S_isless(__x, __y);
3349 }
3350
3351 // }}}
3352 // _S_islessequal {{{
3353 template <typename _Tp, size_t _Np>
3354 static constexpr _MaskMember<_Tp>
3355 _S_islessequal(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
3356 {
3357 const auto __xi = __to_intrin(__x);
3358 const auto __yi = __to_intrin(__y);
3359 if constexpr (__is_avx512_abi<_Abi>())
3360 {
3361 const auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3362 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3363 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LE_OQ);
3364 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3365 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LE_OQ);
3366 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3367 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LE_OQ);
3368 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3369 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LE_OQ);
3370 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3371 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_LE_OQ);
3372 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3373 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_LE_OQ);
3374 else
3375 __assert_unreachable<_Tp>();
3376 }
3377 else if constexpr (__have_avx)
3378 {
3379 if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3380 return __to_masktype(_mm256_cmp_ps(__xi, __yi, _CMP_LE_OQ));
3381 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3382 return __to_masktype(_mm256_cmp_pd(__xi, __yi, _CMP_LE_OQ));
3383 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3384 return __auto_bitcast(_mm_cmp_ps(__xi, __yi, _CMP_LE_OQ));
3385 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3386 return __to_masktype(_mm_cmp_pd(__xi, __yi, _CMP_LE_OQ));
3387 else
3388 __assert_unreachable<_Tp>();
3389 }
3390 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3391 && sizeof(_Tp) == 4)
3392 {
3393 const auto __xn = __vector_bitcast<int>(__xi);
3394 const auto __yn = __vector_bitcast<int>(__yi);
3395 const auto __xp = __xn < 0 ? -(__xn & 0x7fff'ffff) : __xn;
3396 const auto __yp = __yn < 0 ? -(__yn & 0x7fff'ffff) : __yn;
3397 return __auto_bitcast(
3398 __and(__to_masktype(_mm_cmpord_ps(__xi, __yi)), __xp <= __yp));
3399 }
3400 else if constexpr (__have_sse2 && sizeof(__xi) == 16
3401 && sizeof(_Tp) == 8)
3402 return __vector_type_t<__int_with_sizeof_t<8>, 2>{
3403 -_mm_ucomige_sd(__yi, __xi),
3404 -_mm_ucomige_sd(_mm_unpackhi_pd(__yi, __yi),
3405 _mm_unpackhi_pd(__xi, __xi))};
3406 else
3407 return _Base::_S_islessequal(__x, __y);
3408 }
3409
3410 // }}}
3411 // _S_islessgreater {{{
3412 template <typename _Tp, size_t _Np>
3413 static constexpr _MaskMember<_Tp>
3414 _S_islessgreater(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
3415 {
3416 const auto __xi = __to_intrin(__x);
3417 const auto __yi = __to_intrin(__y);
3418 if constexpr (__is_avx512_abi<_Abi>())
3419 {
3420 const auto __k1 = _Abi::template _S_implicit_mask_intrin<_Tp>();
3421 if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 4)
3422 return _mm512_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_NEQ_OQ);
3423 else if constexpr (sizeof(__xi) == 64 && sizeof(_Tp) == 8)
3424 return _mm512_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_NEQ_OQ);
3425 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3426 return _mm256_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_NEQ_OQ);
3427 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3428 return _mm256_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_NEQ_OQ);
3429 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3430 return _mm_mask_cmp_ps_mask(__k1, __xi, __yi, _CMP_NEQ_OQ);
3431 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3432 return _mm_mask_cmp_pd_mask(__k1, __xi, __yi, _CMP_NEQ_OQ);
3433 else
3434 __assert_unreachable<_Tp>();
3435 }
3436 else if constexpr (__have_avx)
3437 {
3438 if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 4)
3439 return __to_masktype(_mm256_cmp_ps(__xi, __yi, _CMP_NEQ_OQ));
3440 else if constexpr (sizeof(__xi) == 32 && sizeof(_Tp) == 8)
3441 return __to_masktype(_mm256_cmp_pd(__xi, __yi, _CMP_NEQ_OQ));
3442 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3443 return __auto_bitcast(_mm_cmp_ps(__xi, __yi, _CMP_NEQ_OQ));
3444 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3445 return __to_masktype(_mm_cmp_pd(__xi, __yi, _CMP_NEQ_OQ));
3446 else
3447 __assert_unreachable<_Tp>();
3448 }
3449 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 4)
3450 return __auto_bitcast(
3451 __and(_mm_cmpord_ps(__xi, __yi), _mm_cmpneq_ps(__xi, __yi)));
3452 else if constexpr (sizeof(__xi) == 16 && sizeof(_Tp) == 8)
3453 return __to_masktype(
3454 __and(_mm_cmpord_pd(__xi, __yi), _mm_cmpneq_pd(__xi, __yi)));
3455 else
3456 __assert_unreachable<_Tp>();
3457 }
3458
3459 //}}} }}}
3460 };
3461
3462// }}}
3463// _MaskImplX86Mixin {{{
3464struct _MaskImplX86Mixin
3465{
3466 template <typename _Tp>
3467 using _TypeTag = _Tp*;
3468
3469 using _Base = _MaskImplBuiltinMixin;
3470
3471 // _S_to_maskvector(bool) {{{
3472 template <typename _Up, size_t _ToN = 1, typename _Tp>
3473 _GLIBCXX_SIMD_INTRINSIC static constexpr enable_if_t<
3474 is_same_v<_Tp, bool>, _SimdWrapper<_Up, _ToN>>
3475 _S_to_maskvector(_Tp __x)
3476 {
3477 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
3478 return __x ? __vector_type_t<_Up, _ToN>{~_Up()}
3479 : __vector_type_t<_Up, _ToN>();
3480 }
3481
3482 // }}}
3483 // _S_to_maskvector(_SanitizedBitMask) {{{
3484 template <typename _Up, size_t _UpN = 0, size_t _Np,
3485 size_t _ToN = _UpN == 0 ? _Np : _UpN>
3486 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Up, _ToN>
3487 _S_to_maskvector(_SanitizedBitMask<_Np> __x)
3488 {
3489 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
3490 using _UV = __vector_type_t<_Up, _ToN>;
3491 using _UI = __intrinsic_type_t<_Up, _ToN>;
3492 [[maybe_unused]] const auto __k = __x._M_to_bits();
3493 if constexpr (_Np == 1)
3494 return _S_to_maskvector<_Up, _ToN>(__k);
3495 else if (__x._M_is_constprop() || __builtin_is_constant_evaluated())
3496 return __generate_from_n_evaluations<std::min(_ToN, _Np), _UV>(
3497 [&](auto __i) -> _Up { return -__x[__i.value]; });
3498 else if constexpr (sizeof(_Up) == 1)
3499 {
3500 if constexpr (sizeof(_UI) == 16)
3501 {
3502 if constexpr (__have_avx512bw_vl)
3503 return __intrin_bitcast<_UV>(_mm_movm_epi8(__k));
3504 else if constexpr (__have_avx512bw)
3505 return __intrin_bitcast<_UV>(__lo128(_mm512_movm_epi8(__k)));
3506 else if constexpr (__have_avx512f)
3507 {
3508 auto __as32bits = _mm512_maskz_mov_epi32(__k, ~__m512i());
3509 auto __as16bits
3510 = __xzyw(_mm256_packs_epi32(__lo256(__as32bits),
3511 __hi256(__as32bits)));
3512 return __intrin_bitcast<_UV>(
3513 _mm_packs_epi16(__lo128(__as16bits), __hi128(__as16bits)));
3514 }
3515 else if constexpr (__have_ssse3)
3516 {
3517 const auto __bitmask = __to_intrin(
3518 __make_vector<_UChar>(1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4,
3519 8, 16, 32, 64, 128));
3520 return __intrin_bitcast<_UV>(
3521 __vector_bitcast<_Up>(
3522 _mm_shuffle_epi8(__to_intrin(
3523 __vector_type_t<_ULLong, 2>{__k}),
3524 _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 1,
3525 1, 1, 1, 1, 1, 1, 1))
3526 & __bitmask)
3527 != 0);
3528 }
3529 // else fall through
3530 }
3531 else if constexpr (sizeof(_UI) == 32)
3532 {
3533 if constexpr (__have_avx512bw_vl)
3534 return __vector_bitcast<_Up>(_mm256_movm_epi8(__k));
3535 else if constexpr (__have_avx512bw)
3536 return __vector_bitcast<_Up>(__lo256(_mm512_movm_epi8(__k)));
3537 else if constexpr (__have_avx512f)
3538 {
3539 auto __as16bits = // 0 16 1 17 ... 15 31
3540 _mm512_srli_epi32(_mm512_maskz_mov_epi32(__k, ~__m512i()),
3541 16)
3542 | _mm512_slli_epi32(_mm512_maskz_mov_epi32(__k >> 16,
3543 ~__m512i()),
3544 16);
3545 auto __0_16_1_17 = __xzyw(_mm256_packs_epi16(
3546 __lo256(__as16bits),
3547 __hi256(__as16bits)) // 0 16 1 17 2 18 3 19 8 24 9 25 ...
3548 );
3549 // deinterleave:
3550 return __vector_bitcast<_Up>(__xzyw(_mm256_shuffle_epi8(
3551 __0_16_1_17, // 0 16 1 17 2 ...
3552 _mm256_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9,
3553 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14, 1,
3554 3, 5, 7, 9, 11, 13,
3555 15)))); // 0-7 16-23 8-15 24-31 -> xzyw
3556 // 0-3 8-11 16-19 24-27
3557 // 4-7 12-15 20-23 28-31
3558 }
3559 else if constexpr (__have_avx2)
3560 {
3561 const auto __bitmask
3562 = _mm256_broadcastsi128_si256(__to_intrin(
3563 __make_vector<_UChar>(1, 2, 4, 8, 16, 32, 64, 128, 1, 2,
3564 4, 8, 16, 32, 64, 128)));
3565 return __vector_bitcast<_Up>(
3566 __vector_bitcast<_Up>(
3567 _mm256_shuffle_epi8(
3568 _mm256_broadcastsi128_si256(
3569 __to_intrin(__vector_type_t<_ULLong, 2>{__k})),
3570 _mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
3571 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
3572 3, 3, 3, 3, 3, 3))
3573 & __bitmask)
3574 != 0);
3575 }
3576 // else fall through
3577 }
3578 else if constexpr (sizeof(_UI) == 64)
3579 return reinterpret_cast<_UV>(_mm512_movm_epi8(__k));
3580 if constexpr (std::min(_ToN, _Np) <= 4)
3581 {
3582 if constexpr (_Np > 7) // avoid overflow
3583 __x &= _SanitizedBitMask<_Np>(0x0f);
3584 const _UInt __char_mask
3585 = ((_UInt(__x.to_ulong()) * 0x00204081U) & 0x01010101ULL)
3586 * 0xff;
3587 _UV __r = {};
3588 __builtin_memcpy(&__r, &__char_mask,
3589 std::min(sizeof(__r), sizeof(__char_mask)));
3590 return __r;
3591 }
3592 else if constexpr (std::min(_ToN, _Np) <= 7)
3593 {
3594 if constexpr (_Np > 7) // avoid overflow
3595 __x &= _SanitizedBitMask<_Np>(0x7f);
3596 const _ULLong __char_mask
3597 = ((__x.to_ulong() * 0x40810204081ULL) & 0x0101010101010101ULL)
3598 * 0xff;
3599 _UV __r = {};
3600 __builtin_memcpy(&__r, &__char_mask,
3601 std::min(sizeof(__r), sizeof(__char_mask)));
3602 return __r;
3603 }
3604 }
3605 else if constexpr (sizeof(_Up) == 2)
3606 {
3607 if constexpr (sizeof(_UI) == 16)
3608 {
3609 if constexpr (__have_avx512bw_vl)
3610 return __intrin_bitcast<_UV>(_mm_movm_epi16(__k));
3611 else if constexpr (__have_avx512bw)
3612 return __intrin_bitcast<_UV>(__lo128(_mm512_movm_epi16(__k)));
3613 else if constexpr (__have_avx512f)
3614 {
3615 __m256i __as32bits = {};
3616 if constexpr (__have_avx512vl)
3617 __as32bits = _mm256_maskz_mov_epi32(__k, ~__m256i());
3618 else
3619 __as32bits
3620 = __lo256(_mm512_maskz_mov_epi32(__k, ~__m512i()));
3621 return __intrin_bitcast<_UV>(
3622 _mm_packs_epi32(__lo128(__as32bits), __hi128(__as32bits)));
3623 }
3624 // else fall through
3625 }
3626 else if constexpr (sizeof(_UI) == 32)
3627 {
3628 if constexpr (__have_avx512bw_vl)
3629 return __vector_bitcast<_Up>(_mm256_movm_epi16(__k));
3630 else if constexpr (__have_avx512bw)
3631 return __vector_bitcast<_Up>(__lo256(_mm512_movm_epi16(__k)));
3632 else if constexpr (__have_avx512f)
3633 {
3634 auto __as32bits = _mm512_maskz_mov_epi32(__k, ~__m512i());
3635 return __vector_bitcast<_Up>(
3636 __xzyw(_mm256_packs_epi32(__lo256(__as32bits),
3637 __hi256(__as32bits))));
3638 }
3639 // else fall through
3640 }
3641 else if constexpr (sizeof(_UI) == 64)
3642 return __vector_bitcast<_Up>(_mm512_movm_epi16(__k));
3643 }
3644 else if constexpr (sizeof(_Up) == 4)
3645 {
3646 if constexpr (sizeof(_UI) == 16)
3647 {
3648 if constexpr (__have_avx512dq_vl)
3649 return __intrin_bitcast<_UV>(_mm_movm_epi32(__k));
3650 else if constexpr (__have_avx512dq)
3651 return __intrin_bitcast<_UV>(__lo128(_mm512_movm_epi32(__k)));
3652 else if constexpr (__have_avx512vl)
3653 return __intrin_bitcast<_UV>(
3654 _mm_maskz_mov_epi32(__k, ~__m128i()));
3655 else if constexpr (__have_avx512f)
3656 return __intrin_bitcast<_UV>(
3657 __lo128(_mm512_maskz_mov_epi32(__k, ~__m512i())));
3658 // else fall through
3659 }
3660 else if constexpr (sizeof(_UI) == 32)
3661 {
3662 if constexpr (__have_avx512dq_vl)
3663 return __vector_bitcast<_Up>(_mm256_movm_epi32(__k));
3664 else if constexpr (__have_avx512dq)
3665 return __vector_bitcast<_Up>(__lo256(_mm512_movm_epi32(__k)));
3666 else if constexpr (__have_avx512vl)
3667 return __vector_bitcast<_Up>(
3668 _mm256_maskz_mov_epi32(__k, ~__m256i()));
3669 else if constexpr (__have_avx512f)
3670 return __vector_bitcast<_Up>(
3671 __lo256(_mm512_maskz_mov_epi32(__k, ~__m512i())));
3672 // else fall through
3673 }
3674 else if constexpr (sizeof(_UI) == 64)
3675 return __vector_bitcast<_Up>(
3676 __have_avx512dq ? _mm512_movm_epi32(__k)
3677 : _mm512_maskz_mov_epi32(__k, ~__m512i()));
3678 }
3679 else if constexpr (sizeof(_Up) == 8)
3680 {
3681 if constexpr (sizeof(_UI) == 16)
3682 {
3683 if constexpr (__have_avx512dq_vl)
3684 return __vector_bitcast<_Up>(_mm_movm_epi64(__k));
3685 else if constexpr (__have_avx512dq)
3686 return __vector_bitcast<_Up>(__lo128(_mm512_movm_epi64(__k)));
3687 else if constexpr (__have_avx512vl)
3688 return __vector_bitcast<_Up>(
3689 _mm_maskz_mov_epi64(__k, ~__m128i()));
3690 else if constexpr (__have_avx512f)
3691 return __vector_bitcast<_Up>(
3692 __lo128(_mm512_maskz_mov_epi64(__k, ~__m512i())));
3693 // else fall through
3694 }
3695 else if constexpr (sizeof(_UI) == 32)
3696 {
3697 if constexpr (__have_avx512dq_vl)
3698 return __vector_bitcast<_Up>(_mm256_movm_epi64(__k));
3699 else if constexpr (__have_avx512dq)
3700 return __vector_bitcast<_Up>(__lo256(_mm512_movm_epi64(__k)));
3701 else if constexpr (__have_avx512vl)
3702 return __vector_bitcast<_Up>(
3703 _mm256_maskz_mov_epi64(__k, ~__m256i()));
3704 else if constexpr (__have_avx512f)
3705 return __vector_bitcast<_Up>(
3706 __lo256(_mm512_maskz_mov_epi64(__k, ~__m512i())));
3707 // else fall through
3708 }
3709 else if constexpr (sizeof(_UI) == 64)
3710 return __vector_bitcast<_Up>(
3711 __have_avx512dq ? _mm512_movm_epi64(__k)
3712 : _mm512_maskz_mov_epi64(__k, ~__m512i()));
3713 }
3714
3715 using _UpUInt = make_unsigned_t<_Up>;
3716 using _V = __vector_type_t<_UpUInt, _ToN>;
3717 constexpr size_t __bits_per_element = sizeof(_Up) * __CHAR_BIT__;
3718 if constexpr (_ToN == 2)
3719 {
3720 return __vector_bitcast<_Up>(_V{_UpUInt(-__x[0]), _UpUInt(-__x[1])});
3721 }
3722 else if constexpr (!__have_avx2 && __have_avx && sizeof(_V) == 32)
3723 {
3724 if constexpr (sizeof(_Up) == 4)
3725 return __vector_bitcast<_Up>(_mm256_cmp_ps(
3726 _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(__k)),
3727 _mm256_castsi256_ps(_mm256_setr_epi32(
3728 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80))),
3729 _mm256_setzero_ps(), _CMP_NEQ_UQ));
3730 else if constexpr (sizeof(_Up) == 8)
3731 return __vector_bitcast<_Up>(_mm256_cmp_pd(
3732 _mm256_and_pd(_mm256_castsi256_pd(_mm256_set1_epi64x(__k)),
3733 _mm256_castsi256_pd(
3734 _mm256_setr_epi64x(0x01, 0x02, 0x04, 0x08))),
3735 _mm256_setzero_pd(), _CMP_NEQ_UQ));
3736 else
3737 __assert_unreachable<_Up>();
3738 }
3739 else if constexpr (__bits_per_element >= _ToN)
3740 {
3741 constexpr auto __bitmask
3742 = __generate_vector<_V>([](auto __i) constexpr->_UpUInt {
3743 return __i < _ToN ? 1ull << __i : 0;
3744 });
3745 const auto __bits
3746 = __vector_broadcast<_ToN, _UpUInt>(__k) & __bitmask;
3747 if constexpr (__bits_per_element > _ToN)
3748 return __vector_bitcast<_Up>(__bits) > 0;
3749 else
3750 return __vector_bitcast<_Up>(__bits != 0);
3751 }
3752 else
3753 {
3754 const _V __tmp
3755 = __generate_vector<_V>([&](auto __i) constexpr {
3756 return static_cast<_UpUInt>(
3757 __k >> (__bits_per_element * (__i / __bits_per_element)));
3758 })
3759 & __generate_vector<_V>([](auto __i) constexpr {
3760 return static_cast<_UpUInt>(1ull
3761 << (__i % __bits_per_element));
3762 }); // mask bit index
3763 return __intrin_bitcast<_UV>(__tmp != _V());
3764 }
3765 }
3766
3767 // }}}
3768 // _S_to_maskvector(_SimdWrapper) {{{
3769 template <typename _Up, size_t _UpN = 0, typename _Tp, size_t _Np,
3770 size_t _ToN = _UpN == 0 ? _Np : _UpN>
3771 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Up, _ToN>
3772 _S_to_maskvector(_SimdWrapper<_Tp, _Np> __x)
3773 {
3774 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
3775 using _TW = _SimdWrapper<_Tp, _Np>;
3776 using _UW = _SimdWrapper<_Up, _ToN>;
3777 using _UI = __intrinsic_type_t<_Up, _ToN>;
3778 if constexpr (is_same_v<_Tp, bool>) // bits -> vector
3779 return _S_to_maskvector<_Up, _ToN>(
3780 _BitMask<_Np>(__x._M_data)._M_sanitized());
3781 // vector -> vector bitcast
3782 else if constexpr (sizeof(_Up) == sizeof(_Tp)
3783 && sizeof(_TW) == sizeof(_UW))
3784 return __wrapper_bitcast<_Up, _ToN>(
3785 _ToN <= _Np
3786 ? __x
3787 : simd_abi::_VecBuiltin<sizeof(_Tp) * _Np>::_S_masked(__x));
3788 else // vector -> vector {{{
3789 {
3790 if (__x._M_is_constprop() || __builtin_is_constant_evaluated())
3791 {
3792 const auto __y = __vector_bitcast<__int_for_sizeof_t<_Tp>>(__x);
3793 return __generate_from_n_evaluations<std::min(_ToN, _Np),
3794 __vector_type_t<_Up, _ToN>>(
3795 [&](auto __i) -> _Up { return __y[__i.value]; });
3796 }
3797 using _To = __vector_type_t<_Up, _ToN>;
3798 [[maybe_unused]] constexpr size_t _FromN = _Np;
3799 constexpr int _FromBytes = sizeof(_Tp);
3800 constexpr int _ToBytes = sizeof(_Up);
3801 const auto __k = __x._M_data;
3802
3803 if constexpr (_FromBytes == _ToBytes)
3804 return __intrin_bitcast<_To>(__k);
3805 else if constexpr (sizeof(_UI) == 16 && sizeof(__k) == 16)
3806 { // SSE -> SSE {{{
3807 if constexpr (_FromBytes == 4 && _ToBytes == 8)
3808 return __intrin_bitcast<_To>(__interleave128_lo(__k, __k));
3809 else if constexpr (_FromBytes == 2 && _ToBytes == 8)
3810 {
3811 const auto __y
3812 = __vector_bitcast<int>(__interleave128_lo(__k, __k));
3813 return __intrin_bitcast<_To>(__interleave128_lo(__y, __y));
3814 }
3815 else if constexpr (_FromBytes == 1 && _ToBytes == 8)
3816 {
3817 auto __y
3818 = __vector_bitcast<short>(__interleave128_lo(__k, __k));
3819 auto __z
3820 = __vector_bitcast<int>(__interleave128_lo(__y, __y));
3821 return __intrin_bitcast<_To>(__interleave128_lo(__z, __z));
3822 }
3823 else if constexpr (_FromBytes == 8 && _ToBytes == 4
3824 && __have_sse2)
3825 return __intrin_bitcast<_To>(
3826 _mm_packs_epi32(__vector_bitcast<_LLong>(__k), __m128i()));
3827 else if constexpr (_FromBytes == 8 && _ToBytes == 4)
3828 return __vector_shuffle<1, 3, 6, 7>(__vector_bitcast<_Up>(__k),
3829 _UI());
3830 else if constexpr (_FromBytes == 2 && _ToBytes == 4)
3831 return __intrin_bitcast<_To>(__interleave128_lo(__k, __k));
3832 else if constexpr (_FromBytes == 1 && _ToBytes == 4)
3833 {
3834 const auto __y
3835 = __vector_bitcast<short>(__interleave128_lo(__k, __k));
3836 return __intrin_bitcast<_To>(__interleave128_lo(__y, __y));
3837 }
3838 else if constexpr (_FromBytes == 8 && _ToBytes == 2)
3839 {
3840 if constexpr (__have_sse2 && !__have_ssse3)
3841 return __intrin_bitcast<_To>(_mm_packs_epi32(
3842 _mm_packs_epi32(__vector_bitcast<_LLong>(__k), __m128i()),
3843 __m128i()));
3844 else
3845 return __intrin_bitcast<_To>(
3846 __vector_permute<3, 7, -1, -1, -1, -1, -1, -1>(
3847 __vector_bitcast<_Up>(__k)));
3848 }
3849 else if constexpr (_FromBytes == 4 && _ToBytes == 2)
3850 return __intrin_bitcast<_To>(
3851 _mm_packs_epi32(__vector_bitcast<_LLong>(__k), __m128i()));
3852 else if constexpr (_FromBytes == 1 && _ToBytes == 2)
3853 return __intrin_bitcast<_To>(__interleave128_lo(__k, __k));
3854 else if constexpr (_FromBytes == 8 && _ToBytes == 1
3855 && __have_ssse3)
3856 return __intrin_bitcast<_To>(
3857 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
3858 _mm_setr_epi8(7, 15, -1, -1, -1, -1, -1, -1,
3859 -1, -1, -1, -1, -1, -1, -1,
3860 -1)));
3861 else if constexpr (_FromBytes == 8 && _ToBytes == 1)
3862 {
3863 auto __y
3864 = _mm_packs_epi32(__vector_bitcast<_LLong>(__k), __m128i());
3865 __y = _mm_packs_epi32(__y, __m128i());
3866 return __intrin_bitcast<_To>(_mm_packs_epi16(__y, __m128i()));
3867 }
3868 else if constexpr (_FromBytes == 4 && _ToBytes == 1
3869 && __have_ssse3)
3870 return __intrin_bitcast<_To>(
3871 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
3872 _mm_setr_epi8(3, 7, 11, 15, -1, -1, -1, -1,
3873 -1, -1, -1, -1, -1, -1, -1,
3874 -1)));
3875 else if constexpr (_FromBytes == 4 && _ToBytes == 1)
3876 {
3877 const auto __y
3878 = _mm_packs_epi32(__vector_bitcast<_LLong>(__k), __m128i());
3879 return __intrin_bitcast<_To>(_mm_packs_epi16(__y, __m128i()));
3880 }
3881 else if constexpr (_FromBytes == 2 && _ToBytes == 1)
3882 return __intrin_bitcast<_To>(
3883 _mm_packs_epi16(__vector_bitcast<_LLong>(__k), __m128i()));
3884 else
3885 __assert_unreachable<_Tp>();
3886 } // }}}
3887 else if constexpr (sizeof(_UI) == 32 && sizeof(__k) == 32)
3888 { // AVX -> AVX {{{
3889 if constexpr (_FromBytes == _ToBytes)
3890 __assert_unreachable<_Tp>();
3891 else if constexpr (_FromBytes == _ToBytes * 2)
3892 {
3893 const auto __y = __vector_bitcast<_LLong>(__k);
3894 return __intrin_bitcast<_To>(_mm256_castsi128_si256(
3895 _mm_packs_epi16(__lo128(__y), __hi128(__y))));
3896 }
3897 else if constexpr (_FromBytes == _ToBytes * 4)
3898 {
3899 const auto __y = __vector_bitcast<_LLong>(__k);
3900 return __intrin_bitcast<_To>(_mm256_castsi128_si256(
3901 _mm_packs_epi16(_mm_packs_epi16(__lo128(__y), __hi128(__y)),
3902 __m128i())));
3903 }
3904 else if constexpr (_FromBytes == _ToBytes * 8)
3905 {
3906 const auto __y = __vector_bitcast<_LLong>(__k);
3907 return __intrin_bitcast<_To>(
3908 _mm256_castsi128_si256(_mm_shuffle_epi8(
3909 _mm_packs_epi16(__lo128(__y), __hi128(__y)),
3910 _mm_setr_epi8(3, 7, 11, 15, -1, -1, -1, -1, -1, -1, -1,
3911 -1, -1, -1, -1, -1))));
3912 }
3913 else if constexpr (_FromBytes * 2 == _ToBytes)
3914 {
3915 auto __y = __xzyw(__to_intrin(__k));
3916 if constexpr (is_floating_point_v<
3917 _Tp> || (!__have_avx2 && _FromBytes == 4))
3918 {
3919 const auto __yy = __vector_bitcast<float>(__y);
3920 return __intrin_bitcast<_To>(
3921 _mm256_unpacklo_ps(__yy, __yy));
3922 }
3923 else
3924 return __intrin_bitcast<_To>(
3925 _mm256_unpacklo_epi8(__y, __y));
3926 }
3927 else if constexpr (_FromBytes * 4 == _ToBytes)
3928 {
3929 auto __y
3930 = _mm_unpacklo_epi8(__lo128(__vector_bitcast<_LLong>(__k)),
3931 __lo128(__vector_bitcast<_LLong>(
3932 __k))); // drops 3/4 of input
3933 return __intrin_bitcast<_To>(
3934 __concat(_mm_unpacklo_epi16(__y, __y),
3935 _mm_unpackhi_epi16(__y, __y)));
3936 }
3937 else if constexpr (_FromBytes == 1 && _ToBytes == 8)
3938 {
3939 auto __y
3940 = _mm_unpacklo_epi8(__lo128(__vector_bitcast<_LLong>(__k)),
3941 __lo128(__vector_bitcast<_LLong>(
3942 __k))); // drops 3/4 of input
3943 __y
3944 = _mm_unpacklo_epi16(__y,
3945 __y); // drops another 1/2 => 7/8 total
3946 return __intrin_bitcast<_To>(
3947 __concat(_mm_unpacklo_epi32(__y, __y),
3948 _mm_unpackhi_epi32(__y, __y)));
3949 }
3950 else
3951 __assert_unreachable<_Tp>();
3952 } // }}}
3953 else if constexpr (sizeof(_UI) == 32 && sizeof(__k) == 16)
3954 { // SSE -> AVX {{{
3955 if constexpr (_FromBytes == _ToBytes)
3956 return __intrin_bitcast<_To>(
3957 __intrinsic_type_t<_Tp, 32 / sizeof(_Tp)>(
3958 __zero_extend(__to_intrin(__k))));
3959 else if constexpr (_FromBytes * 2 == _ToBytes)
3960 { // keep all
3961 return __intrin_bitcast<_To>(
3962 __concat(_mm_unpacklo_epi8(__vector_bitcast<_LLong>(__k),
3963 __vector_bitcast<_LLong>(__k)),
3964 _mm_unpackhi_epi8(__vector_bitcast<_LLong>(__k),
3965 __vector_bitcast<_LLong>(__k))));
3966 }
3967 else if constexpr (_FromBytes * 4 == _ToBytes)
3968 {
3969 if constexpr (__have_avx2)
3970 {
3971 return __intrin_bitcast<_To>(_mm256_shuffle_epi8(
3972 __concat(__vector_bitcast<_LLong>(__k),
3973 __vector_bitcast<_LLong>(__k)),
3974 _mm256_setr_epi8(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3,
3975 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6,
3976 6, 6, 7, 7, 7, 7)));
3977 }
3978 else
3979 {
3980 return __intrin_bitcast<_To>(__concat(
3981 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
3982 _mm_setr_epi8(0, 0, 0, 0, 1, 1, 1, 1,
3983 2, 2, 2, 2, 3, 3, 3, 3)),
3984 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
3985 _mm_setr_epi8(4, 4, 4, 4, 5, 5, 5, 5,
3986 6, 6, 6, 6, 7, 7, 7,
3987 7))));
3988 }
3989 }
3990 else if constexpr (_FromBytes * 8 == _ToBytes)
3991 {
3992 if constexpr (__have_avx2)
3993 {
3994 return __intrin_bitcast<_To>(_mm256_shuffle_epi8(
3995 __concat(__vector_bitcast<_LLong>(__k),
3996 __vector_bitcast<_LLong>(__k)),
3997 _mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
3998 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
3999 3, 3, 3, 3, 3, 3)));
4000 }
4001 else
4002 {
4003 return __intrin_bitcast<_To>(__concat(
4004 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
4005 _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0,
4006 1, 1, 1, 1, 1, 1, 1, 1)),
4007 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
4008 _mm_setr_epi8(2, 2, 2, 2, 2, 2, 2, 2,
4009 3, 3, 3, 3, 3, 3, 3,
4010 3))));
4011 }
4012 }
4013 else if constexpr (_FromBytes == _ToBytes * 2)
4014 return __intrin_bitcast<_To>(__m256i(__zero_extend(
4015 _mm_packs_epi16(__vector_bitcast<_LLong>(__k), __m128i()))));
4016 else if constexpr (_FromBytes == 8 && _ToBytes == 2)
4017 {
4018 return __intrin_bitcast<_To>(__m256i(__zero_extend(
4019 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
4020 _mm_setr_epi8(6, 7, 14, 15, -1, -1, -1, -1,
4021 -1, -1, -1, -1, -1, -1, -1,
4022 -1)))));
4023 }
4024 else if constexpr (_FromBytes == 4 && _ToBytes == 1)
4025 {
4026 return __intrin_bitcast<_To>(__m256i(__zero_extend(
4027 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
4028 _mm_setr_epi8(3, 7, 11, 15, -1, -1, -1, -1,
4029 -1, -1, -1, -1, -1, -1, -1,
4030 -1)))));
4031 }
4032 else if constexpr (_FromBytes == 8 && _ToBytes == 1)
4033 {
4034 return __intrin_bitcast<_To>(__m256i(__zero_extend(
4035 _mm_shuffle_epi8(__vector_bitcast<_LLong>(__k),
4036 _mm_setr_epi8(7, 15, -1, -1, -1, -1, -1,
4037 -1, -1, -1, -1, -1, -1, -1,
4038 -1, -1)))));
4039 }
4040 else
4041 static_assert(!is_same_v<_Tp, _Tp>, "should be unreachable");
4042 } // }}}
4043 else if constexpr (sizeof(_UI) == 16 && sizeof(__k) == 32)
4044 { // AVX -> SSE {{{
4045 if constexpr (_FromBytes == _ToBytes)
4046 { // keep low 1/2
4047 return __intrin_bitcast<_To>(__lo128(__k));
4048 }
4049 else if constexpr (_FromBytes == _ToBytes * 2)
4050 { // keep all
4051 auto __y = __vector_bitcast<_LLong>(__k);
4052 return __intrin_bitcast<_To>(
4053 _mm_packs_epi16(__lo128(__y), __hi128(__y)));
4054 }
4055 else if constexpr (_FromBytes == _ToBytes * 4)
4056 { // add 1/2 undef
4057 auto __y = __vector_bitcast<_LLong>(__k);
4058 return __intrin_bitcast<_To>(
4059 _mm_packs_epi16(_mm_packs_epi16(__lo128(__y), __hi128(__y)),
4060 __m128i()));
4061 }
4062 else if constexpr (_FromBytes == 8 && _ToBytes == 1)
4063 { // add 3/4 undef
4064 auto __y = __vector_bitcast<_LLong>(__k);
4065 return __intrin_bitcast<_To>(_mm_shuffle_epi8(
4066 _mm_packs_epi16(__lo128(__y), __hi128(__y)),
4067 _mm_setr_epi8(3, 7, 11, 15, -1, -1, -1, -1, -1, -1, -1, -1,
4068 -1, -1, -1, -1)));
4069 }
4070 else if constexpr (_FromBytes * 2 == _ToBytes)
4071 { // keep low 1/4
4072 auto __y = __lo128(__vector_bitcast<_LLong>(__k));
4073 return __intrin_bitcast<_To>(_mm_unpacklo_epi8(__y, __y));
4074 }
4075 else if constexpr (_FromBytes * 4 == _ToBytes)
4076 { // keep low 1/8
4077 auto __y = __lo128(__vector_bitcast<_LLong>(__k));
4078 __y = _mm_unpacklo_epi8(__y, __y);
4079 return __intrin_bitcast<_To>(_mm_unpacklo_epi8(__y, __y));
4080 }
4081 else if constexpr (_FromBytes * 8 == _ToBytes)
4082 { // keep low 1/16
4083 auto __y = __lo128(__vector_bitcast<_LLong>(__k));
4084 __y = _mm_unpacklo_epi8(__y, __y);
4085 __y = _mm_unpacklo_epi8(__y, __y);
4086 return __intrin_bitcast<_To>(_mm_unpacklo_epi8(__y, __y));
4087 }
4088 else
4089 static_assert(!is_same_v<_Tp, _Tp>, "should be unreachable");
4090 } // }}}
4091 else
4092 return _Base::template _S_to_maskvector<_Up, _ToN>(__x);
4093 /*
4094 if constexpr (_FromBytes > _ToBytes) {
4095 const _To __y = __vector_bitcast<_Up>(__k);
4096 return [&] <size_t... _Is> (index_sequence<_Is...>) {
4097 constexpr int _Stride = _FromBytes / _ToBytes;
4098 return _To{__y[(_Is + 1) * _Stride - 1]...};
4099 }(make_index_sequence<std::min(_ToN, _FromN)>());
4100 } else {
4101 // {0, 0, 1, 1} (_Dups = 2, _Is<4>)
4102 // {0, 0, 0, 0, 1, 1, 1, 1} (_Dups = 4, _Is<8>)
4103 // {0, 0, 1, 1, 2, 2, 3, 3} (_Dups = 2, _Is<8>)
4104 // ...
4105 return [&] <size_t... _Is> (index_sequence<_Is...>) {
4106 constexpr int __dup = _ToBytes / _FromBytes;
4107 return __intrin_bitcast<_To>(_From{__k[_Is / __dup]...});
4108 }(make_index_sequence<_FromN>());
4109 }
4110 */
4111 } // }}}
4112 }
4113
4114 // }}}
4115 // _S_to_bits {{{
4116 template <typename _Tp, size_t _Np>
4117 _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
4118 _S_to_bits(_SimdWrapper<_Tp, _Np> __x)
4119 {
4120 if constexpr (is_same_v<_Tp, bool>)
4121 return _BitMask<_Np>(__x._M_data)._M_sanitized();
4122 else
4123 {
4124 static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
4125 if (__builtin_is_constant_evaluated()
4126 || __builtin_constant_p(__x._M_data))
4127 {
4128 const auto __bools = -__x._M_data;
4129 const _ULLong __k = __call_with_n_evaluations<_Np>(
4130 [](auto... __bits) { return (__bits | ...); },
4131 [&](auto __i) { return _ULLong(__bools[+__i]) << __i; });
4132 if (__builtin_is_constant_evaluated()
4133 || __builtin_constant_p(__k))
4134 return __k;
4135 }
4136 const auto __xi = __to_intrin(__x);
4137 if constexpr (sizeof(_Tp) == 1)
4138 if constexpr (sizeof(__xi) == 16)
4139 if constexpr (__have_avx512bw_vl)
4140 return _BitMask<_Np>(_mm_movepi8_mask(__xi));
4141 else // implies SSE2
4142 return _BitMask<_Np>(_mm_movemask_epi8(__xi));
4143 else if constexpr (sizeof(__xi) == 32)
4144 if constexpr (__have_avx512bw_vl)
4145 return _BitMask<_Np>(_mm256_movepi8_mask(__xi));
4146 else // implies AVX2
4147 return _BitMask<_Np>(_mm256_movemask_epi8(__xi));
4148 else // implies AVX512BW
4149 return _BitMask<_Np>(_mm512_movepi8_mask(__xi));
4150
4151 else if constexpr (sizeof(_Tp) == 2)
4152 if constexpr (sizeof(__xi) == 16)
4153 if constexpr (__have_avx512bw_vl)
4154 return _BitMask<_Np>(_mm_movepi16_mask(__xi));
4155 else if constexpr (__have_avx512bw)
4156 return _BitMask<_Np>(_mm512_movepi16_mask(__zero_extend(__xi)));
4157 else // implies SSE2
4158 return _BitMask<_Np>(
4159 _mm_movemask_epi8(_mm_packs_epi16(__xi, __m128i())));
4160 else if constexpr (sizeof(__xi) == 32)
4161 if constexpr (__have_avx512bw_vl)
4162 return _BitMask<_Np>(_mm256_movepi16_mask(__xi));
4163 else if constexpr (__have_avx512bw)
4164 return _BitMask<_Np>(_mm512_movepi16_mask(__zero_extend(__xi)));
4165 else // implies SSE2
4166 return _BitMask<_Np>(_mm_movemask_epi8(
4167 _mm_packs_epi16(__lo128(__xi), __hi128(__xi))));
4168 else // implies AVX512BW
4169 return _BitMask<_Np>(_mm512_movepi16_mask(__xi));
4170
4171 else if constexpr (sizeof(_Tp) == 4)
4172 if constexpr (sizeof(__xi) == 16)
4173 if constexpr (__have_avx512dq_vl)
4174 return _BitMask<_Np>(_mm_movepi32_mask(__xi));
4175 else if constexpr (__have_avx512vl)
4176 return _BitMask<_Np>(_mm_cmplt_epi32_mask(__xi, __m128i()));
4177 else if constexpr (__have_avx512dq)
4178 return _BitMask<_Np>(_mm512_movepi32_mask(__zero_extend(__xi)));
4179 else if constexpr (__have_avx512f)
4180 return _BitMask<_Np>(
4181 _mm512_cmplt_epi32_mask(__zero_extend(__xi), __m512i()));
4182 else // implies SSE
4183 return _BitMask<_Np>(
4184 _mm_movemask_ps(reinterpret_cast<__m128>(__xi)));
4185 else if constexpr (sizeof(__xi) == 32)
4186 if constexpr (__have_avx512dq_vl)
4187 return _BitMask<_Np>(_mm256_movepi32_mask(__xi));
4188 else if constexpr (__have_avx512dq)
4189 return _BitMask<_Np>(_mm512_movepi32_mask(__zero_extend(__xi)));
4190 else if constexpr (__have_avx512vl)
4191 return _BitMask<_Np>(_mm256_cmplt_epi32_mask(__xi, __m256i()));
4192 else if constexpr (__have_avx512f)
4193 return _BitMask<_Np>(
4194 _mm512_cmplt_epi32_mask(__zero_extend(__xi), __m512i()));
4195 else // implies AVX
4196 return _BitMask<_Np>(
4197 _mm256_movemask_ps(reinterpret_cast<__m256>(__xi)));
4198 else // implies AVX512??
4199 if constexpr (__have_avx512dq)
4200 return _BitMask<_Np>(_mm512_movepi32_mask(__xi));
4201 else // implies AVX512F
4202 return _BitMask<_Np>(_mm512_cmplt_epi32_mask(__xi, __m512i()));
4203
4204 else if constexpr (sizeof(_Tp) == 8)
4205 if constexpr (sizeof(__xi) == 16)
4206 if constexpr (__have_avx512dq_vl)
4207 return _BitMask<_Np>(_mm_movepi64_mask(__xi));
4208 else if constexpr (__have_avx512dq)
4209 return _BitMask<_Np>(_mm512_movepi64_mask(__zero_extend(__xi)));
4210 else if constexpr (__have_avx512vl)
4211 return _BitMask<_Np>(_mm_cmplt_epi64_mask(__xi, __m128i()));
4212 else if constexpr (__have_avx512f)
4213 return _BitMask<_Np>(
4214 _mm512_cmplt_epi64_mask(__zero_extend(__xi), __m512i()));
4215 else // implies SSE2
4216 return _BitMask<_Np>(
4217 _mm_movemask_pd(reinterpret_cast<__m128d>(__xi)));
4218 else if constexpr (sizeof(__xi) == 32)
4219 if constexpr (__have_avx512dq_vl)
4220 return _BitMask<_Np>(_mm256_movepi64_mask(__xi));
4221 else if constexpr (__have_avx512dq)
4222 return _BitMask<_Np>(_mm512_movepi64_mask(__zero_extend(__xi)));
4223 else if constexpr (__have_avx512vl)
4224 return _BitMask<_Np>(_mm256_cmplt_epi64_mask(__xi, __m256i()));
4225 else if constexpr (__have_avx512f)
4226 return _BitMask<_Np>(
4227 _mm512_cmplt_epi64_mask(__zero_extend(__xi), __m512i()));
4228 else // implies AVX
4229 return _BitMask<_Np>(
4230 _mm256_movemask_pd(reinterpret_cast<__m256d>(__xi)));
4231 else // implies AVX512??
4232 if constexpr (__have_avx512dq)
4233 return _BitMask<_Np>(_mm512_movepi64_mask(__xi));
4234 else // implies AVX512F
4235 return _BitMask<_Np>(_mm512_cmplt_epi64_mask(__xi, __m512i()));
4236
4237 else
4238 __assert_unreachable<_Tp>();
4239 }
4240 }
4241 // }}}
4242};
4243
4244// }}}
4245// _MaskImplX86 {{{
4246template <typename _Abi>
4247 struct _MaskImplX86 : _MaskImplX86Mixin, _MaskImplBuiltin<_Abi>
4248 {
4249 using _MaskImplX86Mixin::_S_to_bits;
4250 using _MaskImplX86Mixin::_S_to_maskvector;
4251 using _MaskImplBuiltin<_Abi>::_S_convert;
4252
4253 // member types {{{
4254 template <typename _Tp>
4255 using _SimdMember = typename _Abi::template __traits<_Tp>::_SimdMember;
4256
4257 template <typename _Tp>
4258 using _MaskMember = typename _Abi::template _MaskMember<_Tp>;
4259
4260 template <typename _Tp>
4261 static constexpr size_t _S_size = simd_size_v<_Tp, _Abi>;
4262
4263 using _Base = _MaskImplBuiltin<_Abi>;
4264
4265 // }}}
4266 // _S_broadcast {{{
4267 template <typename _Tp>
4268 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
4269 _S_broadcast(bool __x)
4270 {
4271 if constexpr (__is_avx512_abi<_Abi>())
4272 return __x ? _Abi::_S_masked(_MaskMember<_Tp>(-1))
4273 : _MaskMember<_Tp>();
4274 else
4275 return _Base::template _S_broadcast<_Tp>(__x);
4276 }
4277
4278 // }}}
4279 // _S_load {{{
4280 template <typename _Tp>
4281 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
4282 _S_load(const bool* __mem)
4283 {
4284 static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
4285 if constexpr (__have_avx512bw)
4286 {
4287 const auto __to_vec_or_bits = [](auto __bits) -> decltype(auto) {
4288 if constexpr (__is_avx512_abi<_Abi>())
4289 return __bits;
4290 else
4291 return _S_to_maskvector<_Tp>(
4292 _BitMask<_S_size<_Tp>>(__bits)._M_sanitized());
4293 };
4294
4295 if constexpr (_S_size<_Tp> <= 16 && __have_avx512vl)
4296 {
4297 __m128i __a = {};
4298 __builtin_memcpy(&__a, __mem, _S_size<_Tp>);
4299 return __to_vec_or_bits(_mm_test_epi8_mask(__a, __a));
4300 }
4301 else if constexpr (_S_size<_Tp> <= 32 && __have_avx512vl)
4302 {
4303 __m256i __a = {};
4304 __builtin_memcpy(&__a, __mem, _S_size<_Tp>);
4305 return __to_vec_or_bits(_mm256_test_epi8_mask(__a, __a));
4306 }
4307 else if constexpr (_S_size<_Tp> <= 64)
4308 {
4309 __m512i __a = {};
4310 __builtin_memcpy(&__a, __mem, _S_size<_Tp>);
4311 return __to_vec_or_bits(_mm512_test_epi8_mask(__a, __a));
4312 }
4313 }
4314 else if constexpr (__is_avx512_abi<_Abi>())
4315 {
4316 if constexpr (_S_size<_Tp> <= 8)
4317 {
4318 __m128i __a = {};
4319 __builtin_memcpy(&__a, __mem, _S_size<_Tp>);
4320 const auto __b = _mm512_cvtepi8_epi64(__a);
4321 return _mm512_test_epi64_mask(__b, __b);
4322 }
4323 else if constexpr (_S_size<_Tp> <= 16)
4324 {
4325 __m128i __a = {};
4326 __builtin_memcpy(&__a, __mem, _S_size<_Tp>);
4327 const auto __b = _mm512_cvtepi8_epi32(__a);
4328 return _mm512_test_epi32_mask(__b, __b);
4329 }
4330 else if constexpr (_S_size<_Tp> <= 32)
4331 {
4332 __m128i __a = {};
4333 __builtin_memcpy(&__a, __mem, 16);
4334 const auto __b = _mm512_cvtepi8_epi32(__a);
4335 __builtin_memcpy(&__a, __mem + 16, _S_size<_Tp> - 16);
4336 const auto __c = _mm512_cvtepi8_epi32(__a);
4337 return _mm512_test_epi32_mask(__b, __b)
4338 | (_mm512_test_epi32_mask(__c, __c) << 16);
4339 }
4340 else if constexpr (_S_size<_Tp> <= 64)
4341 {
4342 __m128i __a = {};
4343 __builtin_memcpy(&__a, __mem, 16);
4344 const auto __b = _mm512_cvtepi8_epi32(__a);
4345 __builtin_memcpy(&__a, __mem + 16, 16);
4346 const auto __c = _mm512_cvtepi8_epi32(__a);
4347 if constexpr (_S_size<_Tp> <= 48)
4348 {
4349 __builtin_memcpy(&__a, __mem + 32, _S_size<_Tp> - 32);
4350 const auto __d = _mm512_cvtepi8_epi32(__a);
4351 return _mm512_test_epi32_mask(__b, __b)
4352 | (_mm512_test_epi32_mask(__c, __c) << 16)
4353 | (_ULLong(_mm512_test_epi32_mask(__d, __d)) << 32);
4354 }
4355 else
4356 {
4357 __builtin_memcpy(&__a, __mem + 16, 16);
4358 const auto __d = _mm512_cvtepi8_epi32(__a);
4359 __builtin_memcpy(&__a, __mem + 32, _S_size<_Tp> - 48);
4360 const auto __e = _mm512_cvtepi8_epi32(__a);
4361 return _mm512_test_epi32_mask(__b, __b)
4362 | (_mm512_test_epi32_mask(__c, __c) << 16)
4363 | (_ULLong(_mm512_test_epi32_mask(__d, __d)) << 32)
4364 | (_ULLong(_mm512_test_epi32_mask(__e, __e)) << 48);
4365 }
4366 }
4367 else
4368 __assert_unreachable<_Tp>();
4369 }
4370 else if constexpr (sizeof(_Tp) == 8 && _S_size<_Tp> == 2)
4371 return __vector_bitcast<_Tp>(
4372 __vector_type16_t<int>{-int(__mem[0]), -int(__mem[0]),
4373 -int(__mem[1]), -int(__mem[1])});
4374 else if constexpr (sizeof(_Tp) == 8 && _S_size<_Tp> <= 4 && __have_avx)
4375 {
4376 int __bool4 = 0;
4377 __builtin_memcpy(&__bool4, __mem, _S_size<_Tp>);
4378 const auto __k = __to_intrin(
4379 (__vector_broadcast<4>(__bool4)
4380 & __make_vector<int>(0x1, 0x100, 0x10000,
4381 _S_size<_Tp> == 4 ? 0x1000000 : 0))
4382 != 0);
4383 return __vector_bitcast<_Tp>(
4384 __concat(_mm_unpacklo_epi32(__k, __k),
4385 _mm_unpackhi_epi32(__k, __k)));
4386 }
4387 else if constexpr (sizeof(_Tp) == 4 && _S_size<_Tp> <= 4)
4388 {
4389 int __bools = 0;
4390 __builtin_memcpy(&__bools, __mem, _S_size<_Tp>);
4391 if constexpr (__have_sse2)
4392 {
4393 __m128i __k = _mm_cvtsi32_si128(__bools);
4394 __k = _mm_cmpgt_epi16(_mm_unpacklo_epi8(__k, __k), __m128i());
4395 return __vector_bitcast<_Tp, _S_size<_Tp>>(
4396 _mm_unpacklo_epi16(__k, __k));
4397 }
4398 else
4399 {
4400 __m128 __k = _mm_cvtpi8_ps(_mm_cvtsi32_si64(__bools));
4401 _mm_empty();
4402 return __vector_bitcast<_Tp, _S_size<_Tp>>(
4403 _mm_cmpgt_ps(__k, __m128()));
4404 }
4405 }
4406 else if constexpr (sizeof(_Tp) == 4 && _S_size<_Tp> <= 8)
4407 {
4408 __m128i __k = {};
4409 __builtin_memcpy(&__k, __mem, _S_size<_Tp>);
4410 __k = _mm_cmpgt_epi16(_mm_unpacklo_epi8(__k, __k), __m128i());
4411 return __vector_bitcast<_Tp>(
4412 __concat(_mm_unpacklo_epi16(__k, __k),
4413 _mm_unpackhi_epi16(__k, __k)));
4414 }
4415 else if constexpr (sizeof(_Tp) == 2 && _S_size<_Tp> <= 16)
4416 {
4417 __m128i __k = {};
4418 __builtin_memcpy(&__k, __mem, _S_size<_Tp>);
4419 __k = _mm_cmpgt_epi8(__k, __m128i());
4420 if constexpr (_S_size<_Tp> <= 8)
4421 return __vector_bitcast<_Tp, _S_size<_Tp>>(
4422 _mm_unpacklo_epi8(__k, __k));
4423 else
4424 return __concat(_mm_unpacklo_epi8(__k, __k),
4425 _mm_unpackhi_epi8(__k, __k));
4426 }
4427 else
4428 return _Base::template _S_load<_Tp>(__mem);
4429 }
4430
4431 // }}}
4432 // _S_from_bitmask{{{
4433 template <size_t _Np, typename _Tp>
4434 _GLIBCXX_SIMD_INTRINSIC static _MaskMember<_Tp>
4435 _S_from_bitmask(_SanitizedBitMask<_Np> __bits, _TypeTag<_Tp>)
4436 {
4437 static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
4438 if constexpr (__is_avx512_abi<_Abi>())
4439 return __bits._M_to_bits();
4440 else
4441 return _S_to_maskvector<_Tp, _S_size<_Tp>>(__bits);
4442 }
4443
4444 // }}}
4445 // _S_masked_load {{{2
4446 template <typename _Tp, size_t _Np>
4447 static inline _SimdWrapper<_Tp, _Np>
4448 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge,
4449 _SimdWrapper<_Tp, _Np> __mask, const bool* __mem) noexcept
4450 {
4451 if constexpr (__is_avx512_abi<_Abi>())
4452 {
4453 if constexpr (__have_avx512bw_vl)
4454 {
4455 if constexpr (_Np <= 16)
4456 {
4457 const auto __a
4458 = _mm_mask_loadu_epi8(__m128i(), __mask, __mem);
4459 return (__merge & ~__mask) | _mm_test_epi8_mask(__a, __a);
4460 }
4461 else if constexpr (_Np <= 32)
4462 {
4463 const auto __a
4464 = _mm256_mask_loadu_epi8(__m256i(), __mask, __mem);
4465 return (__merge & ~__mask)
4466 | _mm256_test_epi8_mask(__a, __a);
4467 }
4468 else if constexpr (_Np <= 64)
4469 {
4470 const auto __a
4471 = _mm512_mask_loadu_epi8(__m512i(), __mask, __mem);
4472 return (__merge & ~__mask)
4473 | _mm512_test_epi8_mask(__a, __a);
4474 }
4475 else
4476 __assert_unreachable<_Tp>();
4477 }
4478 else
4479 {
4480 _BitOps::_S_bit_iteration(__mask, [&](auto __i) {
4481 __merge._M_set(__i, __mem[__i]);
4482 });
4483 return __merge;
4484 }
4485 }
4486 else if constexpr (__have_avx512bw_vl && _Np == 32 && sizeof(_Tp) == 1)
4487 {
4488 const auto __k = _S_to_bits(__mask)._M_to_bits();
4489 __merge = _mm256_mask_sub_epi8(__to_intrin(__merge), __k, __m256i(),
4490 _mm256_mask_loadu_epi8(__m256i(),
4491 __k, __mem));
4492 }
4493 else if constexpr (__have_avx512bw_vl && _Np == 16 && sizeof(_Tp) == 1)
4494 {
4495 const auto __k = _S_to_bits(__mask)._M_to_bits();
4496 __merge
4497 = _mm_mask_sub_epi8(__vector_bitcast<_LLong>(__merge), __k,
4498 __m128i(),
4499 _mm_mask_loadu_epi8(__m128i(), __k, __mem));
4500 }
4501 else if constexpr (__have_avx512bw_vl && _Np == 16 && sizeof(_Tp) == 2)
4502 {
4503 const auto __k = _S_to_bits(__mask)._M_to_bits();
4504 __merge = _mm256_mask_sub_epi16(
4505 __vector_bitcast<_LLong>(__merge), __k, __m256i(),
4506 _mm256_cvtepi8_epi16(_mm_mask_loadu_epi8(__m128i(), __k, __mem)));
4507 }
4508 else if constexpr (__have_avx512bw_vl && _Np == 8 && sizeof(_Tp) == 2)
4509 {
4510 const auto __k = _S_to_bits(__mask)._M_to_bits();
4511 __merge = _mm_mask_sub_epi16(
4512 __vector_bitcast<_LLong>(__merge), __k, __m128i(),
4513 _mm_cvtepi8_epi16(_mm_mask_loadu_epi8(__m128i(), __k, __mem)));
4514 }
4515 else if constexpr (__have_avx512bw_vl && _Np == 8 && sizeof(_Tp) == 4)
4516 {
4517 const auto __k = _S_to_bits(__mask)._M_to_bits();
4518 __merge = __vector_bitcast<_Tp>(_mm256_mask_sub_epi32(
4519 __vector_bitcast<_LLong>(__merge), __k, __m256i(),
4520 _mm256_cvtepi8_epi32(
4521 _mm_mask_loadu_epi8(__m128i(), __k, __mem))));
4522 }
4523 else if constexpr (__have_avx512bw_vl && _Np == 4 && sizeof(_Tp) == 4)
4524 {
4525 const auto __k = _S_to_bits(__mask)._M_to_bits();
4526 __merge = __vector_bitcast<_Tp>(_mm_mask_sub_epi32(
4527 __vector_bitcast<_LLong>(__merge), __k, __m128i(),
4528 _mm_cvtepi8_epi32(_mm_mask_loadu_epi8(__m128i(), __k, __mem))));
4529 }
4530 else if constexpr (__have_avx512bw_vl && _Np == 4 && sizeof(_Tp) == 8)
4531 {
4532 const auto __k = _S_to_bits(__mask)._M_to_bits();
4533 __merge = __vector_bitcast<_Tp>(_mm256_mask_sub_epi64(
4534 __vector_bitcast<_LLong>(__merge), __k, __m256i(),
4535 _mm256_cvtepi8_epi64(
4536 _mm_mask_loadu_epi8(__m128i(), __k, __mem))));
4537 }
4538 else if constexpr (__have_avx512bw_vl && _Np == 2 && sizeof(_Tp) == 8)
4539 {
4540 const auto __k = _S_to_bits(__mask)._M_to_bits();
4541 __merge = __vector_bitcast<_Tp>(_mm_mask_sub_epi64(
4542 __vector_bitcast<_LLong>(__merge), __k, __m128i(),
4543 _mm_cvtepi8_epi64(_mm_mask_loadu_epi8(__m128i(), __k, __mem))));
4544 }
4545 else
4546 return _Base::_S_masked_load(__merge, __mask, __mem);
4547 return __merge;
4548 }
4549
4550 // _S_store {{{2
4551 template <typename _Tp, size_t _Np>
4552 _GLIBCXX_SIMD_INTRINSIC static void _S_store(_SimdWrapper<_Tp, _Np> __v,
4553 bool* __mem) noexcept
4554 {
4555 if constexpr (__is_avx512_abi<_Abi>())
4556 {
4557 if constexpr (__have_avx512bw_vl)
4558 _CommonImplX86::_S_store<_Np>(
4559 __vector_bitcast<char>([](auto __data) {
4560 if constexpr (_Np <= 16)
4561 return _mm_maskz_set1_epi8(__data, 1);
4562 else if constexpr (_Np <= 32)
4563 return _mm256_maskz_set1_epi8(__data, 1);
4564 else
4565 return _mm512_maskz_set1_epi8(__data, 1);
4566 }(__v._M_data)),
4567 __mem);
4568 else if constexpr (_Np <= 8)
4569 _CommonImplX86::_S_store<_Np>(
4570 __vector_bitcast<char>(
4571#if defined __x86_64__
4572 __make_wrapper<_ULLong>(
4573 _pdep_u64(__v._M_data, 0x0101010101010101ULL), 0ull)
4574#else
4575 __make_wrapper<_UInt>(_pdep_u32(__v._M_data, 0x01010101U),
4576 _pdep_u32(__v._M_data >> 4,
4577 0x01010101U))
4578#endif
4579 ),
4580 __mem);
4581 else if constexpr (_Np <= 16)
4582 _mm512_mask_cvtepi32_storeu_epi8(
4583 __mem, 0xffffu >> (16 - _Np),
4584 _mm512_maskz_set1_epi32(__v._M_data, 1));
4585 else
4586 __assert_unreachable<_Tp>();
4587 }
4588 else if constexpr (__is_sse_abi<_Abi>()) //{{{
4589 {
4590 if constexpr (_Np == 2 && sizeof(_Tp) == 8)
4591 {
4592 const auto __k = __vector_bitcast<int>(__v);
4593 __mem[0] = -__k[1];
4594 __mem[1] = -__k[3];
4595 }
4596 else if constexpr (_Np <= 4 && sizeof(_Tp) == 4)
4597 {
4598 if constexpr (__have_sse2)
4599 {
4600 const unsigned __bool4
4601 = __vector_bitcast<_UInt>(_mm_packs_epi16(
4602 _mm_packs_epi32(__intrin_bitcast<__m128i>(
4603 __to_intrin(__v)),
4604 __m128i()),
4605 __m128i()))[0]
4606 & 0x01010101u;
4607 __builtin_memcpy(__mem, &__bool4, _Np);
4608 }
4609 else if constexpr (__have_mmx)
4610 {
4611 const __m64 __k = _mm_cvtps_pi8(
4612 __and(__to_intrin(__v), _mm_set1_ps(1.f)));
4613 __builtin_memcpy(__mem, &__k, _Np);
4614 _mm_empty();
4615 }
4616 else
4617 return _Base::_S_store(__v, __mem);
4618 }
4619 else if constexpr (_Np <= 8 && sizeof(_Tp) == 2)
4620 {
4621 _CommonImplX86::_S_store<_Np>(
4622 __vector_bitcast<char>(_mm_packs_epi16(
4623 __to_intrin(__vector_bitcast<_UShort>(__v) >> 15),
4624 __m128i())),
4625 __mem);
4626 }
4627 else if constexpr (_Np <= 16 && sizeof(_Tp) == 1)
4628 _CommonImplX86::_S_store<_Np>(__v._M_data & 1, __mem);
4629 else
4630 __assert_unreachable<_Tp>();
4631 } // }}}
4632 else if constexpr (__is_avx_abi<_Abi>()) // {{{
4633 {
4634 if constexpr (_Np <= 4 && sizeof(_Tp) == 8)
4635 {
4636 auto __k = __intrin_bitcast<__m256i>(__to_intrin(__v));
4637 int __bool4;
4638 if constexpr (__have_avx2)
4639 __bool4 = _mm256_movemask_epi8(__k);
4640 else
4641 __bool4 = (_mm_movemask_epi8(__lo128(__k))
4642 | (_mm_movemask_epi8(__hi128(__k)) << 16));
4643 __bool4 &= 0x01010101;
4644 __builtin_memcpy(__mem, &__bool4, _Np);
4645 }
4646 else if constexpr (_Np <= 8 && sizeof(_Tp) == 4)
4647 {
4648 const auto __k = __intrin_bitcast<__m256i>(__to_intrin(__v));
4649 const auto __k2
4650 = _mm_srli_epi16(_mm_packs_epi16(__lo128(__k), __hi128(__k)),
4651 15);
4652 const auto __k3
4653 = __vector_bitcast<char>(_mm_packs_epi16(__k2, __m128i()));
4654 _CommonImplX86::_S_store<_Np>(__k3, __mem);
4655 }
4656 else if constexpr (_Np <= 16 && sizeof(_Tp) == 2)
4657 {
4658 if constexpr (__have_avx2)
4659 {
4660 const auto __x = _mm256_srli_epi16(__to_intrin(__v), 15);
4661 const auto __bools = __vector_bitcast<char>(
4662 _mm_packs_epi16(__lo128(__x), __hi128(__x)));
4663 _CommonImplX86::_S_store<_Np>(__bools, __mem);
4664 }
4665 else
4666 {
4667 const auto __bools
4668 = 1
4669 & __vector_bitcast<_UChar>(
4670 _mm_packs_epi16(__lo128(__to_intrin(__v)),
4671 __hi128(__to_intrin(__v))));
4672 _CommonImplX86::_S_store<_Np>(__bools, __mem);
4673 }
4674 }
4675 else if constexpr (_Np <= 32 && sizeof(_Tp) == 1)
4676 _CommonImplX86::_S_store<_Np>(1 & __v._M_data, __mem);
4677 else
4678 __assert_unreachable<_Tp>();
4679 } // }}}
4680 else
4681 __assert_unreachable<_Tp>();
4682 }
4683
4684 // _S_masked_store {{{2
4685 template <typename _Tp, size_t _Np>
4686 static inline void
4687 _S_masked_store(const _SimdWrapper<_Tp, _Np> __v, bool* __mem,
4688 const _SimdWrapper<_Tp, _Np> __k) noexcept
4689 {
4690 if constexpr (__is_avx512_abi<_Abi>())
4691 {
4692 static_assert(is_same_v<_Tp, bool>);
4693 if constexpr (_Np <= 16 && __have_avx512bw_vl)
4694 _mm_mask_storeu_epi8(__mem, __k, _mm_maskz_set1_epi8(__v, 1));
4695 else if constexpr (_Np <= 16)
4696 _mm512_mask_cvtepi32_storeu_epi8(__mem, __k,
4697 _mm512_maskz_set1_epi32(__v, 1));
4698 else if constexpr (_Np <= 32 && __have_avx512bw_vl)
4699 _mm256_mask_storeu_epi8(__mem, __k,
4700 _mm256_maskz_set1_epi8(__v, 1));
4701 else if constexpr (_Np <= 32 && __have_avx512bw)
4702 _mm256_mask_storeu_epi8(__mem, __k,
4703 __lo256(_mm512_maskz_set1_epi8(__v, 1)));
4704 else if constexpr (_Np <= 64 && __have_avx512bw)
4705 _mm512_mask_storeu_epi8(__mem, __k,
4706 _mm512_maskz_set1_epi8(__v, 1));
4707 else
4708 __assert_unreachable<_Tp>();
4709 }
4710 else
4711 _Base::_S_masked_store(__v, __mem, __k);
4712 }
4713
4714 // logical and bitwise operators {{{2
4715 template <typename _Tp, size_t _Np>
4716 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
4717 _S_logical_and(const _SimdWrapper<_Tp, _Np>& __x,
4718 const _SimdWrapper<_Tp, _Np>& __y)
4719 {
4720 if constexpr (is_same_v<_Tp, bool>)
4721 {
4722 if constexpr (__have_avx512dq && _Np <= 8)
4723 return _kand_mask8(__x._M_data, __y._M_data);
4724 else if constexpr (_Np <= 16)
4725 return _kand_mask16(__x._M_data, __y._M_data);
4726 else if constexpr (__have_avx512bw && _Np <= 32)
4727 return _kand_mask32(__x._M_data, __y._M_data);
4728 else if constexpr (__have_avx512bw && _Np <= 64)
4729 return _kand_mask64(__x._M_data, __y._M_data);
4730 else
4731 __assert_unreachable<_Tp>();
4732 }
4733 else
4734 return _Base::_S_logical_and(__x, __y);
4735 }
4736
4737 template <typename _Tp, size_t _Np>
4738 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
4739 _S_logical_or(const _SimdWrapper<_Tp, _Np>& __x,
4740 const _SimdWrapper<_Tp, _Np>& __y)
4741 {
4742 if constexpr (is_same_v<_Tp, bool>)
4743 {
4744 if constexpr (__have_avx512dq && _Np <= 8)
4745 return _kor_mask8(__x._M_data, __y._M_data);
4746 else if constexpr (_Np <= 16)
4747 return _kor_mask16(__x._M_data, __y._M_data);
4748 else if constexpr (__have_avx512bw && _Np <= 32)
4749 return _kor_mask32(__x._M_data, __y._M_data);
4750 else if constexpr (__have_avx512bw && _Np <= 64)
4751 return _kor_mask64(__x._M_data, __y._M_data);
4752 else
4753 __assert_unreachable<_Tp>();
4754 }
4755 else
4756 return _Base::_S_logical_or(__x, __y);
4757 }
4758
4759 template <typename _Tp, size_t _Np>
4760 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
4761 _S_bit_not(const _SimdWrapper<_Tp, _Np>& __x)
4762 {
4763 if constexpr (is_same_v<_Tp, bool>)
4764 {
4765 if constexpr (__have_avx512dq && _Np <= 8)
4766 return _kandn_mask8(__x._M_data,
4767 _Abi::template __implicit_mask_n<_Np>());
4768 else if constexpr (_Np <= 16)
4769 return _kandn_mask16(__x._M_data,
4770 _Abi::template __implicit_mask_n<_Np>());
4771 else if constexpr (__have_avx512bw && _Np <= 32)
4772 return _kandn_mask32(__x._M_data,
4773 _Abi::template __implicit_mask_n<_Np>());
4774 else if constexpr (__have_avx512bw && _Np <= 64)
4775 return _kandn_mask64(__x._M_data,
4776 _Abi::template __implicit_mask_n<_Np>());
4777 else
4778 __assert_unreachable<_Tp>();
4779 }
4780 else
4781 return _Base::_S_bit_not(__x);
4782 }
4783
4784 template <typename _Tp, size_t _Np>
4785 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
4786 _S_bit_and(const _SimdWrapper<_Tp, _Np>& __x,
4787 const _SimdWrapper<_Tp, _Np>& __y)
4788 {
4789 if constexpr (is_same_v<_Tp, bool>)
4790 {
4791 if constexpr (__have_avx512dq && _Np <= 8)
4792 return _kand_mask8(__x._M_data, __y._M_data);
4793 else if constexpr (_Np <= 16)
4794 return _kand_mask16(__x._M_data, __y._M_data);
4795 else if constexpr (__have_avx512bw && _Np <= 32)
4796 return _kand_mask32(__x._M_data, __y._M_data);
4797 else if constexpr (__have_avx512bw && _Np <= 64)
4798 return _kand_mask64(__x._M_data, __y._M_data);
4799 else
4800 __assert_unreachable<_Tp>();
4801 }
4802 else
4803 return _Base::_S_bit_and(__x, __y);
4804 }
4805
4806 template <typename _Tp, size_t _Np>
4807 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
4808 _S_bit_or(const _SimdWrapper<_Tp, _Np>& __x,
4809 const _SimdWrapper<_Tp, _Np>& __y)
4810 {
4811 if constexpr (is_same_v<_Tp, bool>)
4812 {
4813 if constexpr (__have_avx512dq && _Np <= 8)
4814 return _kor_mask8(__x._M_data, __y._M_data);
4815 else if constexpr (_Np <= 16)
4816 return _kor_mask16(__x._M_data, __y._M_data);
4817 else if constexpr (__have_avx512bw && _Np <= 32)
4818 return _kor_mask32(__x._M_data, __y._M_data);
4819 else if constexpr (__have_avx512bw && _Np <= 64)
4820 return _kor_mask64(__x._M_data, __y._M_data);
4821 else
4822 __assert_unreachable<_Tp>();
4823 }
4824 else
4825 return _Base::_S_bit_or(__x, __y);
4826 }
4827
4828 template <typename _Tp, size_t _Np>
4829 _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdWrapper<_Tp, _Np>
4830 _S_bit_xor(const _SimdWrapper<_Tp, _Np>& __x,
4831 const _SimdWrapper<_Tp, _Np>& __y)
4832 {
4833 if constexpr (is_same_v<_Tp, bool>)
4834 {
4835 if constexpr (__have_avx512dq && _Np <= 8)
4836 return _kxor_mask8(__x._M_data, __y._M_data);
4837 else if constexpr (_Np <= 16)
4838 return _kxor_mask16(__x._M_data, __y._M_data);
4839 else if constexpr (__have_avx512bw && _Np <= 32)
4840 return _kxor_mask32(__x._M_data, __y._M_data);
4841 else if constexpr (__have_avx512bw && _Np <= 64)
4842 return _kxor_mask64(__x._M_data, __y._M_data);
4843 else
4844 __assert_unreachable<_Tp>();
4845 }
4846 else
4847 return _Base::_S_bit_xor(__x, __y);
4848 }
4849
4850 //}}}2
4851 // _S_masked_assign{{{
4852 template <size_t _Np>
4853 _GLIBCXX_SIMD_INTRINSIC static void
4854 _S_masked_assign(_SimdWrapper<bool, _Np> __k,
4855 _SimdWrapper<bool, _Np>& __lhs,
4856 _SimdWrapper<bool, _Np> __rhs)
4857 {
4858 __lhs._M_data
4859 = (~__k._M_data & __lhs._M_data) | (__k._M_data & __rhs._M_data);
4860 }
4861
4862 template <size_t _Np>
4863 _GLIBCXX_SIMD_INTRINSIC static void
4864 _S_masked_assign(_SimdWrapper<bool, _Np> __k,
4865 _SimdWrapper<bool, _Np>& __lhs, bool __rhs)
4866 {
4867 if (__rhs)
4868 __lhs._M_data = __k._M_data | __lhs._M_data;
4869 else
4870 __lhs._M_data = ~__k._M_data & __lhs._M_data;
4871 }
4872
4873 using _MaskImplBuiltin<_Abi>::_S_masked_assign;
4874
4875 //}}}
4876 // _S_all_of {{{
4877 template <typename _Tp>
4878 _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
4879 {
4880 if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
4881 {
4882 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4883 using _TI = __intrinsic_type_t<_Tp, _Np>;
4884 const _TI __a = reinterpret_cast<_TI>(__to_intrin(__data(__k)));
4885 if constexpr (__have_sse4_1)
4886 {
4887 _GLIBCXX_SIMD_USE_CONSTEXPR _TI __b
4888 = _Abi::template _S_implicit_mask_intrin<_Tp>();
4889 return 0 != __testc(__a, __b);
4890 }
4891 else if constexpr (is_same_v<_Tp, float>)
4892 return (_mm_movemask_ps(__a) & ((1 << _Np) - 1))
4893 == (1 << _Np) - 1;
4894 else if constexpr (is_same_v<_Tp, double>)
4895 return (_mm_movemask_pd(__a) & ((1 << _Np) - 1))
4896 == (1 << _Np) - 1;
4897 else
4898 return (_mm_movemask_epi8(__a) & ((1 << (_Np * sizeof(_Tp))) - 1))
4899 == (1 << (_Np * sizeof(_Tp))) - 1;
4900 }
4901 else if constexpr (__is_avx512_abi<_Abi>())
4902 {
4903 constexpr auto _Mask = _Abi::template _S_implicit_mask<_Tp>();
4904 const auto __kk = __k._M_data._M_data;
4905 if constexpr (sizeof(__kk) == 1)
4906 {
4907 if constexpr (__have_avx512dq)
4908 return _kortestc_mask8_u8(__kk, _Mask == 0xff
4909 ? __kk
4910 : __mmask8(~_Mask));
4911 else
4912 return _kortestc_mask16_u8(__kk, __mmask16(~_Mask));
4913 }
4914 else if constexpr (sizeof(__kk) == 2)
4915 return _kortestc_mask16_u8(__kk, _Mask == 0xffff
4916 ? __kk
4917 : __mmask16(~_Mask));
4918 else if constexpr (sizeof(__kk) == 4 && __have_avx512bw)
4919 return _kortestc_mask32_u8(__kk, _Mask == 0xffffffffU
4920 ? __kk
4921 : __mmask32(~_Mask));
4922 else if constexpr (sizeof(__kk) == 8 && __have_avx512bw)
4923 return _kortestc_mask64_u8(__kk, _Mask == 0xffffffffffffffffULL
4924 ? __kk
4925 : __mmask64(~_Mask));
4926 else
4927 __assert_unreachable<_Tp>();
4928 }
4929 }
4930
4931 // }}}
4932 // _S_any_of {{{
4933 template <typename _Tp>
4934 _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
4935 {
4936 if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
4937 {
4938 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4939 using _TI = __intrinsic_type_t<_Tp, _Np>;
4940 const _TI __a = reinterpret_cast<_TI>(__to_intrin(__data(__k)));
4941 if constexpr (__have_sse4_1)
4942 {
4943 if constexpr (_Abi::template _S_is_partial<
4944 _Tp> || sizeof(__k) < 16)
4945 {
4946 _GLIBCXX_SIMD_USE_CONSTEXPR _TI __b
4947 = _Abi::template _S_implicit_mask_intrin<_Tp>();
4948 return 0 == __testz(__a, __b);
4949 }
4950 else
4951 return 0 == __testz(__a, __a);
4952 }
4953 else if constexpr (is_same_v<_Tp, float>)
4954 return (_mm_movemask_ps(__a) & ((1 << _Np) - 1)) != 0;
4955 else if constexpr (is_same_v<_Tp, double>)
4956 return (_mm_movemask_pd(__a) & ((1 << _Np) - 1)) != 0;
4957 else
4958 return (_mm_movemask_epi8(__a) & ((1 << (_Np * sizeof(_Tp))) - 1))
4959 != 0;
4960 }
4961 else if constexpr (__is_avx512_abi<_Abi>())
4962 return (__k._M_data._M_data & _Abi::template _S_implicit_mask<_Tp>())
4963 != 0;
4964 }
4965
4966 // }}}
4967 // _S_none_of {{{
4968 template <typename _Tp>
4969 _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
4970 {
4971 if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
4972 {
4973 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4974 using _TI = __intrinsic_type_t<_Tp, _Np>;
4975 const _TI __a = reinterpret_cast<_TI>(__to_intrin(__data(__k)));
4976 if constexpr (__have_sse4_1)
4977 {
4978 if constexpr (_Abi::template _S_is_partial<
4979 _Tp> || sizeof(__k) < 16)
4980 {
4981 _GLIBCXX_SIMD_USE_CONSTEXPR _TI __b
4982 = _Abi::template _S_implicit_mask_intrin<_Tp>();
4983 return 0 != __testz(__a, __b);
4984 }
4985 else
4986 return 0 != __testz(__a, __a);
4987 }
4988 else if constexpr (is_same_v<_Tp, float>)
4989 return (__movemask(__a) & ((1 << _Np) - 1)) == 0;
4990 else if constexpr (is_same_v<_Tp, double>)
4991 return (__movemask(__a) & ((1 << _Np) - 1)) == 0;
4992 else
4993 return (__movemask(__a) & int((1ull << (_Np * sizeof(_Tp))) - 1))
4994 == 0;
4995 }
4996 else if constexpr (__is_avx512_abi<_Abi>())
4997 return (__k._M_data._M_data & _Abi::template _S_implicit_mask<_Tp>())
4998 == 0;
4999 }
5000
5001 // }}}
5002 // _S_some_of {{{
5003 template <typename _Tp>
5004 _GLIBCXX_SIMD_INTRINSIC static bool _S_some_of(simd_mask<_Tp, _Abi> __k)
5005 {
5006 if constexpr (__is_sse_abi<_Abi>() || __is_avx_abi<_Abi>())
5007 {
5008 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
5009 using _TI = __intrinsic_type_t<_Tp, _Np>;
5010 const _TI __a = reinterpret_cast<_TI>(__to_intrin(__data(__k)));
5011 if constexpr (__have_sse4_1)
5012 {
5013 _GLIBCXX_SIMD_USE_CONSTEXPR _TI __b
5014 = _Abi::template _S_implicit_mask_intrin<_Tp>();
5015 return 0 != __testnzc(__a, __b);
5016 }
5017 else if constexpr (is_same_v<_Tp, float>)
5018 {
5019 constexpr int __allbits = (1 << _Np) - 1;
5020 const auto __tmp = _mm_movemask_ps(__a) & __allbits;
5021 return __tmp > 0 && __tmp < __allbits;
5022 }
5023 else if constexpr (is_same_v<_Tp, double>)
5024 {
5025 constexpr int __allbits = (1 << _Np) - 1;
5026 const auto __tmp = _mm_movemask_pd(__a) & __allbits;
5027 return __tmp > 0 && __tmp < __allbits;
5028 }
5029 else
5030 {
5031 constexpr int __allbits = (1 << (_Np * sizeof(_Tp))) - 1;
5032 const auto __tmp = _mm_movemask_epi8(__a) & __allbits;
5033 return __tmp > 0 && __tmp < __allbits;
5034 }
5035 }
5036 else if constexpr (__is_avx512_abi<_Abi>())
5037 return _S_any_of(__k) && !_S_all_of(__k);
5038 else
5039 __assert_unreachable<_Tp>();
5040 }
5041
5042 // }}}
5043 // _S_popcount {{{
5044 template <typename _Tp>
5045 _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
5046 {
5047 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
5048 const auto __kk = _Abi::_S_masked(__k._M_data)._M_data;
5049 if constexpr (__is_avx512_abi<_Abi>())
5050 {
5051 if constexpr (_Np > 32)
5052 return __builtin_popcountll(__kk);
5053 else
5054 return __builtin_popcount(__kk);
5055 }
5056 else
5057 {
5058 if constexpr (__have_popcnt)
5059 {
5060 int __bits
5061 = __movemask(__to_intrin(__vector_bitcast<_Tp>(__kk)));
5062 const int __count = __builtin_popcount(__bits);
5063 return is_integral_v<_Tp> ? __count / sizeof(_Tp) : __count;
5064 }
5065 else if constexpr (_Np == 2 && sizeof(_Tp) == 8)
5066 {
5067 const int mask = _mm_movemask_pd(__auto_bitcast(__kk));
5068 return mask - (mask >> 1);
5069 }
5070 else if constexpr (_Np <= 4 && sizeof(_Tp) == 8)
5071 {
5072 auto __x = -(__lo128(__kk) + __hi128(__kk));
5073 return __x[0] + __x[1];
5074 }
5075 else if constexpr (_Np <= 4 && sizeof(_Tp) == 4)
5076 {
5077 if constexpr (__have_sse2)
5078 {
5079 __m128i __x = __intrin_bitcast<__m128i>(__to_intrin(__kk));
5080 __x = _mm_add_epi32(
5081 __x, _mm_shuffle_epi32(__x, _MM_SHUFFLE(0, 1, 2, 3)));
5082 __x = _mm_add_epi32(
5083 __x, _mm_shufflelo_epi16(__x, _MM_SHUFFLE(1, 0, 3, 2)));
5084 return -_mm_cvtsi128_si32(__x);
5085 }
5086 else
5087 return __builtin_popcount(
5088 _mm_movemask_ps(__auto_bitcast(__kk)));
5089 }
5090 else if constexpr (_Np <= 8 && sizeof(_Tp) == 2)
5091 {
5092 auto __x = __to_intrin(__kk);
5093 __x = _mm_add_epi16(__x,
5094 _mm_shuffle_epi32(__x,
5095 _MM_SHUFFLE(0, 1, 2, 3)));
5096 __x = _mm_add_epi16(
5097 __x, _mm_shufflelo_epi16(__x, _MM_SHUFFLE(0, 1, 2, 3)));
5098 __x = _mm_add_epi16(
5099 __x, _mm_shufflelo_epi16(__x, _MM_SHUFFLE(2, 3, 0, 1)));
5100 return -short(_mm_extract_epi16(__x, 0));
5101 }
5102 else if constexpr (_Np <= 16 && sizeof(_Tp) == 1)
5103 {
5104 auto __x = __to_intrin(__kk);
5105 __x = _mm_add_epi8(__x,
5106 _mm_shuffle_epi32(__x,
5107 _MM_SHUFFLE(0, 1, 2, 3)));
5108 __x = _mm_add_epi8(__x,
5109 _mm_shufflelo_epi16(__x, _MM_SHUFFLE(0, 1, 2,
5110 3)));
5111 __x = _mm_add_epi8(__x,
5112 _mm_shufflelo_epi16(__x, _MM_SHUFFLE(2, 3, 0,
5113 1)));
5114 auto __y = -__vector_bitcast<_UChar>(__x);
5115 if constexpr (__have_sse4_1)
5116 return __y[0] + __y[1];
5117 else
5118 {
5119 unsigned __z = _mm_extract_epi16(__to_intrin(__y), 0);
5120 return (__z & 0xff) + (__z >> 8);
5121 }
5122 }
5123 else if constexpr (sizeof(__kk) == 32)
5124 {
5125 // The following works only as long as the implementations above
5126 // use a summation
5127 using _I = __int_for_sizeof_t<_Tp>;
5128 const auto __as_int = __vector_bitcast<_I>(__kk);
5129 _MaskImplX86<simd_abi::__sse>::_S_popcount(
5130 simd_mask<_I, simd_abi::__sse>(__private_init,
5131 __lo128(__as_int)
5132 + __hi128(__as_int)));
5133 }
5134 else
5135 __assert_unreachable<_Tp>();
5136 }
5137 }
5138
5139 // }}}
5140 // _S_find_first_set {{{
5141 template <typename _Tp>
5142 _GLIBCXX_SIMD_INTRINSIC static int
5143 _S_find_first_set(simd_mask<_Tp, _Abi> __k)
5144 {
5145 if constexpr (__is_avx512_abi<_Abi>())
5146 return std::__countr_zero(__k._M_data._M_data);
5147 else
5148 return _Base::_S_find_first_set(__k);
5149 }
5150
5151 // }}}
5152 // _S_find_last_set {{{
5153 template <typename _Tp>
5154 _GLIBCXX_SIMD_INTRINSIC static int
5155 _S_find_last_set(simd_mask<_Tp, _Abi> __k)
5156 {
5157 if constexpr (__is_avx512_abi<_Abi>())
5158 return std::__bit_width(__k._M_data._M_data) - 1;
5159 else
5160 return _Base::_S_find_last_set(__k);
5161 }
5162
5163 // }}}
5164 };
5165
5166// }}}
5167
5168_GLIBCXX_SIMD_END_NAMESPACE
5169#endif // __cplusplus >= 201703L
5170#endif // _GLIBCXX_EXPERIMENTAL_SIMD_X86_H_
5171
5172// vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
Definition: type_traits:2583
typename enable_if< _Cond, _Tp >::type enable_if_t
Alias template for enable_if.
Definition: type_traits:2579
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:230