forked from vectorclass/version2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
vectori256.h
5745 lines (5190 loc) · 218 KB
/
vectori256.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**************************** vectori256.h *******************************
* Author: Agner Fog
* Date created: 2012-05-30
* Last modified: 2020-02-23
* Version: 2.01.01
* Project: vector class library
* Description:
* Header file defining integer vector classes as interface to intrinsic
* functions in x86 microprocessors with AVX2 and later instruction sets.
*
* Instructions: see vcl_manual.pdf
*
* The following vector classes are defined here:
* Vec256b Vector of 256 bits. Used internally as base class
* Vec32c Vector of 32 8-bit signed integers
* Vec32uc Vector of 32 8-bit unsigned integers
* Vec32cb Vector of 32 Booleans for use with Vec32c and Vec32uc
* Vec16s Vector of 16 16-bit signed integers
* Vec16us Vector of 16 16-bit unsigned integers
* Vec16sb Vector of 16 Booleans for use with Vec16s and Vec16us
* Vec8i Vector of 8 32-bit signed integers
* Vec8ui Vector of 8 32-bit unsigned integers
* Vec8ib Vector of 8 Booleans for use with Vec8i and Vec8ui
* Vec4q Vector of 4 64-bit signed integers
* Vec4uq Vector of 4 64-bit unsigned integers
* Vec4qb Vector of 4 Booleans for use with Vec4q and Vec4uq
*
* Each vector object is represented internally in the CPU as a 256-bit register.
* This header file defines operators and functions for these vectors.
*
* (c) Copyright 2012-2020 Agner Fog.
* Apache License version 2.0 or later.
*****************************************************************************/
#ifndef VECTORI256_H
#define VECTORI256_H 1
#ifndef VECTORCLASS_H
#include "vectorclass.h"
#endif
#if VECTORCLASS_H < 20100
#error Incompatible versions of vector class library mixed
#endif
// check combination of header files
#if defined (VECTORI256E_H)
#error Two different versions of vectori256.h included
#endif
#ifdef VCL_NAMESPACE
namespace VCL_NAMESPACE {
#endif
// Generate a constant vector of 8 integers stored in memory.
template <uint32_t i0, uint32_t i1, uint32_t i2, uint32_t i3, uint32_t i4, uint32_t i5, uint32_t i6, uint32_t i7 >
static inline constexpr __m256i constant8ui() {
/*
const union {
uint32_t i[8];
__m256i ymm;
} u = { {i0,i1,i2,i3,i4,i5,i6,i7} };
return u.ymm;
*/
return _mm256_setr_epi32(i0,i1,i2,i3,i4,i5,i6,i7);
}
// Join two 128-bit vectors
#define set_m128ir(lo,hi) _mm256_inserti128_si256(_mm256_castsi128_si256(lo),(hi),1)
/*****************************************************************************
*
* Compact boolean vectors
*
*****************************************************************************/
#if INSTRSET >= 10 // 32-bit and 64-bit masks require AVX512BW
// Compact vector of 32 booleans
class Vec32b {
protected:
__mmask32 mm; // Boolean mask register
public:
// Default constructor:
Vec32b() {
}
// Constructor to convert from type __mmask32 used in intrinsics
// Made explicit to prevent implicit conversion from int
Vec32b(__mmask32 x) {
mm = x;
}
/*
// Constructor to build from all elements:
Vec32b(bool b0, bool b1, bool b2, bool b3, bool b4, bool b5, bool b6, bool b7,
bool b8, bool b9, bool b10, bool b11, bool b12, bool b13, bool b14, bool b15,
bool b16, bool b17, bool b18, bool b19, bool b20, bool b21, bool b22, bool b23,
bool b24, bool b25, bool b26, bool b27, bool b28, bool b29, bool b30, bool b31) {
mm = uint32_t(
(uint32_t)b0 | (uint32_t)b1 << 1 | (uint32_t)b2 << 2 | (uint32_t)b3 << 3 |
(uint32_t)b4 << 4 | (uint32_t)b5 << 5 | (uint32_t)b6 << 6 | (uint32_t)b7 << 7 |
(uint32_t)b8 << 8 | (uint32_t)b9 << 9 | (uint32_t)b10 << 10 | (uint32_t)b11 << 11 |
(uint32_t)b12 << 12 | (uint32_t)b13 << 13 | (uint32_t)b14 << 14 | (uint32_t)b15 << 15 |
(uint32_t)b16 << 16 | (uint32_t)b17 << 17 | (uint32_t)b18 << 18 | (uint32_t)b19 << 19 |
(uint32_t)b20 << 20 | (uint32_t)b21 << 21 | (uint32_t)b22 << 22 | (uint32_t)b23 << 23 |
(uint32_t)b24 << 24 | (uint32_t)b25 << 25 | (uint32_t)b26 << 26 | (uint32_t)b27 << 27 |
(uint32_t)b28 << 28 | (uint32_t)b29 << 29 | (uint32_t)b30 << 30 | (uint32_t)b31 << 31);
} */
// Constructor to broadcast single value:
Vec32b(bool b) {
mm = __mmask32(-int32_t(b));
}
// Constructor to make from two halves
Vec32b(Vec16b const x0, Vec16b const x1) {
mm = uint16_t(__mmask16(x0)) | uint32_t(__mmask16(x1)) << 16;
}
// Assignment operator to convert from type __mmask32 used in intrinsics:
Vec32b & operator = (__mmask32 x) {
mm = x;
return *this;
}
// Assignment operator to broadcast scalar value:
Vec32b & operator = (bool b) {
mm = Vec32b(b);
return *this;
}
// Type cast operator to convert to __mmask32 used in intrinsics
operator __mmask32() const {
return mm;
}
// split into two halves
Vec16b get_low() const {
return Vec16b(__mmask16(mm));
}
Vec16b get_high() const {
return Vec16b(__mmask16(mm >> 16));
}
// Member function to change a single element in vector
Vec32b const insert(int index, bool value) {
mm = __mmask32(((uint32_t)mm & ~(1 << index)) | (uint32_t)value << index);
return *this;
}
// Member function extract a single element from vector
bool extract(int index) const {
return ((uint32_t)mm >> index) & 1;
}
// Extract a single element. Operator [] can only read an element, not write.
bool operator [] (int index) const {
return extract(index);
}
// Member function to change a bitfield to a boolean vector
Vec32b & load_bits(uint32_t a) {
mm = __mmask32(a);
return *this;
}
// Number of elements
static constexpr int size() {
return 32;
}
// Type of elements
static constexpr int elementtype() {
return 2;
}
};
#endif
/*****************************************************************************
*
* Vector of 256 bits. Used as base class
*
*****************************************************************************/
class Vec256b {
protected:
__m256i ymm; // Integer vector
public:
// Default constructor:
Vec256b() {}
// Constructor to broadcast the same value into all elements
// Removed because of undesired implicit conversions:
//Vec256b(int i) {ymm = _mm256_set1_epi32(-(i & 1));}
// Constructor to build from two Vec128b:
Vec256b(Vec128b const a0, Vec128b const a1) {
ymm = set_m128ir(a0, a1);
}
// Constructor to convert from type __m256i used in intrinsics:
Vec256b(__m256i const x) {
ymm = x;
}
// Assignment operator to convert from type __m256i used in intrinsics:
Vec256b & operator = (__m256i const x) {
ymm = x;
return *this;
}
// Type cast operator to convert to __m256i used in intrinsics
operator __m256i() const {
return ymm;
}
// Member function to load from array (unaligned)
Vec256b & load(void const * p) {
ymm = _mm256_loadu_si256((__m256i const*)p);
return *this;
}
// Member function to load from array, aligned by 32
// You may use load_a instead of load if you are certain that p points to an address
// divisible by 32, but there is hardly any speed advantage of load_a on modern processors
Vec256b & load_a(void const * p) {
ymm = _mm256_load_si256((__m256i const*)p);
return *this;
}
// Member function to store into array (unaligned)
void store(void * p) const {
_mm256_storeu_si256((__m256i*)p, ymm);
}
// Member function storing into array, aligned by 32
// You may use store_a instead of store if you are certain that p points to an address
// divisible by 32, but there is hardly any speed advantage of load_a on modern processors
void store_a(void * p) const {
_mm256_store_si256((__m256i*)p, ymm);
}
// Member function storing to aligned uncached memory (non-temporal store).
// This may be more efficient than store_a when storing large blocks of memory if it
// is unlikely that the data will stay in the cache until it is read again.
// Note: Will generate runtime error if p is not aligned by 32
void store_nt(void * p) const {
_mm256_stream_si256((__m256i*)p, ymm);
}
// Member functions to split into two Vec128b:
Vec128b get_low() const {
return _mm256_castsi256_si128(ymm);
}
Vec128b get_high() const {
return _mm256_extractf128_si256(ymm,1);
}
static constexpr int size() {
return 256;
}
static constexpr int elementtype() {
return 1;
}
typedef __m256i registertype;
};
// Define operators and functions for this class
// vector operator & : bitwise and
static inline Vec256b operator & (Vec256b const a, Vec256b const b) {
return _mm256_and_si256(a, b);
}
static inline Vec256b operator && (Vec256b const a, Vec256b const b) {
return a & b;
}
// vector operator | : bitwise or
static inline Vec256b operator | (Vec256b const a, Vec256b const b) {
return _mm256_or_si256(a, b);
}
static inline Vec256b operator || (Vec256b const a, Vec256b const b) {
return a | b;
}
// vector operator ^ : bitwise xor
static inline Vec256b operator ^ (Vec256b const a, Vec256b const b) {
return _mm256_xor_si256(a, b);
}
// vector operator ~ : bitwise not
static inline Vec256b operator ~ (Vec256b const a) {
return _mm256_xor_si256(a, _mm256_set1_epi32(-1));
}
// vector operator &= : bitwise and
static inline Vec256b & operator &= (Vec256b & a, Vec256b const b) {
a = a & b;
return a;
}
// vector operator |= : bitwise or
static inline Vec256b & operator |= (Vec256b & a, Vec256b const b) {
a = a | b;
return a;
}
// vector operator ^= : bitwise xor
static inline Vec256b & operator ^= (Vec256b & a, Vec256b const b) {
a = a ^ b;
return a;
}
// function andnot: a & ~ b
static inline Vec256b andnot (Vec256b const a, Vec256b const b) {
return _mm256_andnot_si256(b, a);
}
/*****************************************************************************
*
* selectb function
*
*****************************************************************************/
// Select between two sources, byte by byte. Used in various functions and operators
// Corresponds to this pseudocode:
// for (int i = 0; i < 32; i++) result[i] = s[i] ? a[i] : b[i];
// Each byte in s must be either 0 (false) or 0xFF (true). No other values are allowed.
// Only bit 7 in each byte of s is checked,
static inline __m256i selectb (__m256i const s, __m256i const a, __m256i const b) {
return _mm256_blendv_epi8 (b, a, s);
}
// horizontal_and. Returns true if all bits are 1
static inline bool horizontal_and (Vec256b const a) {
return _mm256_testc_si256(a,_mm256_set1_epi32(-1)) != 0;
}
// horizontal_or. Returns true if at least one bit is 1
static inline bool horizontal_or (Vec256b const a) {
return ! _mm256_testz_si256(a,a);
}
/*****************************************************************************
*
* Vector of 32 8-bit signed integers
*
*****************************************************************************/
class Vec32c : public Vec256b {
public:
// Default constructor:
Vec32c(){
}
// Constructor to broadcast the same value into all elements:
Vec32c(int i) {
ymm = _mm256_set1_epi8((char)i);
}
// Constructor to build from all elements:
Vec32c(int8_t i0, int8_t i1, int8_t i2, int8_t i3, int8_t i4, int8_t i5, int8_t i6, int8_t i7,
int8_t i8, int8_t i9, int8_t i10, int8_t i11, int8_t i12, int8_t i13, int8_t i14, int8_t i15,
int8_t i16, int8_t i17, int8_t i18, int8_t i19, int8_t i20, int8_t i21, int8_t i22, int8_t i23,
int8_t i24, int8_t i25, int8_t i26, int8_t i27, int8_t i28, int8_t i29, int8_t i30, int8_t i31) {
ymm = _mm256_setr_epi8(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15,
i16, i17, i18, i19, i20, i21, i22, i23, i24, i25, i26, i27, i28, i29, i30, i31);
}
// Constructor to build from two Vec16c:
Vec32c(Vec16c const a0, Vec16c const a1) {
ymm = set_m128ir(a0, a1);
}
// Constructor to convert from type __m256i used in intrinsics:
Vec32c(__m256i const x) {
ymm = x;
}
// Assignment operator to convert from type __m256i used in intrinsics:
Vec32c & operator = (__m256i const x) {
ymm = x;
return *this;
}
// Constructor to convert from type Vec256b used in emulation
Vec32c(Vec256b const & x) { // gcc requires const &
ymm = x;
}
// Type cast operator to convert to __m256i used in intrinsics
operator __m256i() const {
return ymm;
}
// Member function to load from array (unaligned)
Vec32c & load(void const * p) {
ymm = _mm256_loadu_si256((__m256i const*)p);
return *this;
}
// Member function to load from array, aligned by 32
Vec32c & load_a(void const * p) {
ymm = _mm256_load_si256((__m256i const*)p);
return *this;
}
// Partial load. Load n elements and set the rest to 0
Vec32c & load_partial(int n, void const * p) {
#if INSTRSET >= 10 // AVX512VL
ymm = _mm256_maskz_loadu_epi8(__mmask32(((uint64_t)1 << n) - 1), p);
#else
if (n <= 0) {
*this = 0;
}
else if (n <= 16) {
*this = Vec32c(Vec16c().load_partial(n, p), 0);
}
else if (n < 32) {
*this = Vec32c(Vec16c().load(p), Vec16c().load_partial(n-16, (char const*)p+16));
}
else {
load(p);
}
#endif
return *this;
}
// Partial store. Store n elements
void store_partial(int n, void * p) const {
#if INSTRSET >= 10 // AVX512VL + AVX512BW
_mm256_mask_storeu_epi8(p, __mmask32(((uint64_t)1 << n) - 1), ymm);
#else
if (n <= 0) {
return;
}
else if (n <= 16) {
get_low().store_partial(n, p);
}
else if (n < 32) {
get_low().store(p);
get_high().store_partial(n-16, (char*)p+16);
}
else {
store(p);
}
#endif
}
// cut off vector to n elements. The last 32-n elements are set to zero
Vec32c & cutoff(int n) {
#if INSTRSET >= 10
ymm = _mm256_maskz_mov_epi8(__mmask32(((uint64_t)1 << n) - 1), ymm);
#else
if (uint32_t(n) >= 32) return *this;
const union {
int32_t i[16];
char c[64];
} mask = {{-1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0}};
*this &= Vec32c().load(mask.c+32-n);
#endif
return *this;
}
// Member function to change a single element in vector
Vec32c const insert(int index, int8_t value) {
#if INSTRSET >= 10
ymm = _mm256_mask_set1_epi8(ymm, __mmask32(1u << index), value);
#else
const int8_t maskl[64] = {0,0,0,0, 0,0,0,0, 0,0,0,0 ,0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0,
-1,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ,0,0,0,0, 0,0,0,0, 0,0,0,0};
__m256i broad = _mm256_set1_epi8(value); // broadcast value into all elements
__m256i mask = _mm256_loadu_si256((__m256i const*)(maskl+32-(index & 0x1F))); // mask with FF at index position
ymm = selectb(mask,broad,ymm);
#endif
return *this;
}
// Member function extract a single element from vector
int8_t extract(int index) const {
#if INSTRSET >= 10 && defined (__AVX512VBMI2__)
__m256i x = _mm256_maskz_compress_epi8(__mmask32(1u << index), ymm);
return (int8_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(x));
#else
int8_t x[32];
store(x);
return x[index & 0x1F];
#endif
}
// Extract a single element. Use store function if extracting more than one element.
// Operator [] can only read an element, not write.
int8_t operator [] (int index) const {
return extract(index);
}
// Member functions to split into two Vec16c:
Vec16c get_low() const {
return _mm256_castsi256_si128(ymm);
}
Vec16c get_high() const {
return _mm256_extracti128_si256(ymm,1);
}
static constexpr int size() {
return 32;
}
static constexpr int elementtype() {
return 4;
}
};
/*****************************************************************************
*
* Vec32cb: Vector of 32 Booleans for use with Vec32c and Vec32uc
*
*****************************************************************************/
#if INSTRSET < 10 // broad boolean vectors
class Vec32cb : public Vec32c {
public:
// Default constructor:
Vec32cb(){
}
// Constructor to build from all elements:
/*
Vec32cb(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7,
bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15,
bool x16, bool x17, bool x18, bool x19, bool x20, bool x21, bool x22, bool x23,
bool x24, bool x25, bool x26, bool x27, bool x28, bool x29, bool x30, bool x31) :
Vec32c(-int8_t(x0), -int8_t(x1), -int8_t(x2), -int8_t(x3), -int8_t(x4), -int8_t(x5), -int8_t(x6), -int8_t(x7),
-int8_t(x8), -int8_t(x9), -int8_t(x10), -int8_t(x11), -int8_t(x12), -int8_t(x13), -int8_t(x14), -int8_t(x15),
-int8_t(x16), -int8_t(x17), -int8_t(x18), -int8_t(x19), -int8_t(x20), -int8_t(x21), -int8_t(x22), -int8_t(x23),
-int8_t(x24), -int8_t(x25), -int8_t(x26), -int8_t(x27), -int8_t(x28), -int8_t(x29), -int8_t(x30), -int8_t(x31))
{} */
// Constructor to convert from type __m256i used in intrinsics:
Vec32cb(__m256i const x) {
ymm = x;
}
// Assignment operator to convert from type __m256i used in intrinsics:
Vec32cb & operator = (__m256i const x) {
ymm = x;
return *this;
}
// Constructor to broadcast scalar value:
Vec32cb(bool b) : Vec32c(-int8_t(b)) {
}
// Constructor to convert from Vec32c
Vec32cb(Vec32c const a) {
ymm = a;
}
// Assignment operator to broadcast scalar value:
Vec32cb & operator = (bool b) {
*this = Vec32cb(b);
return *this;
}
// Constructor to build from two Vec16cb:
Vec32cb(Vec16cb const a0, Vec16cb const a1) : Vec32c(Vec16c(a0), Vec16c(a1)) {
}
// Member functions to split into two Vec16c:
Vec16cb get_low() const {
return Vec16cb(Vec32c::get_low());
}
Vec16cb get_high() const {
return Vec16cb(Vec32c::get_high());
}
Vec32cb & insert (int index, bool a) {
Vec32c::insert(index, -(int8_t)a);
return *this;
}
// Member function extract a single element from vector
bool extract(int index) const {
return Vec32c::extract(index) != 0;
}
// Extract a single element. Use store function if extracting more than one element.
// Operator [] can only read an element, not write.
bool operator [] (int index) const {
return extract(index);
}
// Member function to change a bitfield to a boolean vector
Vec32cb & load_bits(uint32_t a) {
__m256i b1 = _mm256_set1_epi32((int32_t)~a); // broadcast a. Invert because we have no compare-not-equal
__m256i m1 = constant8ui<0,0,0x01010101,0x01010101,0x02020202,0x02020202,0x03030303,0x03030303>();
__m256i c1 = _mm256_shuffle_epi8(b1, m1); // get right byte in each position
__m256i m2 = constant8ui<0x08040201,0x80402010,0x08040201,0x80402010,0x08040201,0x80402010,0x08040201,0x80402010>();
__m256i d1 = _mm256_and_si256(c1, m2); // isolate one bit in each byte
ymm = _mm256_cmpeq_epi8(d1,_mm256_setzero_si256());// compare with 0
return *this;
}
static constexpr int elementtype() {
return 3;
}
// Prevent constructing from int, etc.
Vec32cb(int b) = delete;
Vec32cb & operator = (int x) = delete;
};
#else
typedef Vec32b Vec32cb; // compact boolean vector
#endif
/*****************************************************************************
*
* Define operators and functions for Vec32b or Vec32cb
*
*****************************************************************************/
// vector operator & : bitwise and
static inline Vec32cb operator & (Vec32cb const a, Vec32cb const b) {
#if INSTRSET >= 10 // compact boolean vectors
return __mmask32(__mmask32(a) & __mmask32(b)); // _kand_mask32 not defined in all compilers
#else
return Vec32c(Vec256b(a) & Vec256b(b));
#endif
}
static inline Vec32cb operator && (Vec32cb const a, Vec32cb const b) {
return a & b;
}
// vector operator &= : bitwise and
static inline Vec32cb & operator &= (Vec32cb & a, Vec32cb const b) {
a = a & b;
return a;
}
// vector operator | : bitwise or
static inline Vec32cb operator | (Vec32cb const a, Vec32cb const b) {
#if INSTRSET >= 10 // compact boolean vectors
return __mmask32(__mmask32(a) | __mmask32(b)); // _kor_mask32
#else
return Vec32c(Vec256b(a) | Vec256b(b));
#endif
}
static inline Vec32cb operator || (Vec32cb const a, Vec32cb const b) {
return a | b;
}
// vector operator |= : bitwise or
static inline Vec32cb & operator |= (Vec32cb & a, Vec32cb const b) {
a = a | b;
return a;
}
// vector operator ^ : bitwise xor
static inline Vec32cb operator ^ (Vec32cb const a, Vec32cb const b) {
#if INSTRSET >= 10 // compact boolean vectors
return __mmask32(__mmask32(a) ^ __mmask32(b)); // _kxor_mask32
#else
return Vec32c(Vec256b(a) ^ Vec256b(b));
#endif
}
// vector operator ^= : bitwise xor
static inline Vec32cb & operator ^= (Vec32cb & a, Vec32cb const b) {
a = a ^ b;
return a;
}
// vector operator == : xnor
static inline Vec32cb operator == (Vec32cb const a, Vec32cb const b) {
#if INSTRSET >= 10 // compact boolean vectors
return __mmask32(__mmask32(a) ^ ~__mmask32(b)); // _kxnor_mask32
#else
return Vec32c(a ^ (~b));
#endif
}
// vector operator != : xor
static inline Vec32cb operator != (Vec32cb const a, Vec32cb const b) {
return Vec32cb(a ^ b);
}
// vector operator ~ : bitwise not
static inline Vec32cb operator ~ (Vec32cb const a) {
#if INSTRSET >= 10 // compact boolean vectors
return __mmask32(~ __mmask32(a)); // _knot_mask32
#else
return Vec32c( ~ Vec256b(a));
#endif
}
// vector operator ! : element not
static inline Vec32cb operator ! (Vec32cb const a) {
return ~ a;
}
// vector function andnot
static inline Vec32cb andnot (Vec32cb const a, Vec32cb const b) {
#if INSTRSET >= 10 // compact boolean vectors
return __mmask32(~__mmask32(b) & __mmask32(a)); // _kandn_mask32
#else
return Vec32c(andnot(Vec256b(a), Vec256b(b)));
#endif
}
#if INSTRSET >= 10 // compact boolean vectors
// horizontal_and. Returns true if all elements are true
static inline bool horizontal_and(Vec32b const a) {
return __mmask32(a) == 0xFFFFFFFF;
}
// horizontal_or. Returns true if at least one element is true
static inline bool horizontal_or(Vec32b const a) {
return __mmask32(a) != 0;
}
// fix bug in gcc version 70400 header file: _mm256_cmp_epi8_mask returns 16 bit mask, should be 32 bit
template <int i>
static inline __mmask32 _mm256_cmp_epi8_mask_fix(__m256i a, __m256i b) {
#if defined (GCC_VERSION) && GCC_VERSION < 70900 && ! defined (__INTEL_COMPILER)
return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi)a, (__v32qi)b, i, (__mmask32)(-1));
#else
return _mm256_cmp_epi8_mask(a, b, i);
#endif
}
template <int i>
static inline __mmask32 _mm256_cmp_epu8_mask_fix(__m256i a, __m256i b) {
#if defined (GCC_VERSION) && GCC_VERSION < 70900 && ! defined (__INTEL_COMPILER)
return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)a, (__v32qi)b, i, (__mmask32)(-1));
#else
return _mm256_cmp_epu8_mask(a, b, i);
#endif
}
#endif
/*****************************************************************************
*
* Operators for Vec32c
*
*****************************************************************************/
// vector operator + : add element by element
static inline Vec32c operator + (Vec32c const a, Vec32c const b) {
return _mm256_add_epi8(a, b);
}
// vector operator += : add
static inline Vec32c & operator += (Vec32c & a, Vec32c const b) {
a = a + b;
return a;
}
// postfix operator ++
static inline Vec32c operator ++ (Vec32c & a, int) {
Vec32c a0 = a;
a = a + 1;
return a0;
}
// prefix operator ++
static inline Vec32c & operator ++ (Vec32c & a) {
a = a + 1;
return a;
}
// vector operator - : subtract element by element
static inline Vec32c operator - (Vec32c const a, Vec32c const b) {
return _mm256_sub_epi8(a, b);
}
// vector operator - : unary minus
static inline Vec32c operator - (Vec32c const a) {
return _mm256_sub_epi8(_mm256_setzero_si256(), a);
}
// vector operator -= : add
static inline Vec32c & operator -= (Vec32c & a, Vec32c const b) {
a = a - b;
return a;
}
// postfix operator --
static inline Vec32c operator -- (Vec32c & a, int) {
Vec32c a0 = a;
a = a - 1;
return a0;
}
// prefix operator --
static inline Vec32c & operator -- (Vec32c & a) {
a = a - 1;
return a;
}
// vector operator * : multiply element by element
static inline Vec32c operator * (Vec32c const a, Vec32c const b) {
// There is no 8-bit multiply in AVX2. Split into two 16-bit multiplications
__m256i aodd = _mm256_srli_epi16(a,8); // odd numbered elements of a
__m256i bodd = _mm256_srli_epi16(b,8); // odd numbered elements of b
__m256i muleven = _mm256_mullo_epi16(a,b); // product of even numbered elements
__m256i mulodd = _mm256_mullo_epi16(aodd,bodd); // product of odd numbered elements
mulodd = _mm256_slli_epi16(mulodd,8); // put odd numbered elements back in place
#if INSTRSET >= 10 // AVX512VL + AVX512BW
return _mm256_mask_mov_epi8(mulodd, 0x55555555, muleven);
#else
__m256i mask = _mm256_set1_epi32(0x00FF00FF); // mask for even positions
__m256i product = selectb(mask,muleven,mulodd); // interleave even and odd
return product;
#endif
}
// vector operator *= : multiply
static inline Vec32c & operator *= (Vec32c & a, Vec32c const b) {
a = a * b;
return a;
}
// vector operator << : shift left all elements
static inline Vec32c operator << (Vec32c const a, int b) {
uint32_t mask = (uint32_t)0xFF >> (uint32_t)b; // mask to remove bits that are shifted out
__m256i am = _mm256_and_si256(a,_mm256_set1_epi8((char)mask));// remove bits that will overflow
__m256i res = _mm256_sll_epi16(am,_mm_cvtsi32_si128(b)); // 16-bit shifts
return res;
}
// vector operator <<= : shift left
static inline Vec32c & operator <<= (Vec32c & a, int b) {
a = a << b;
return a;
}
// vector operator >> : shift right arithmetic all elements
static inline Vec32c operator >> (Vec32c const a, int b) {
__m256i aeven = _mm256_slli_epi16(a,8); // even numbered elements of a. get sign bit in position
aeven = _mm256_sra_epi16(aeven,_mm_cvtsi32_si128(b+8)); // shift arithmetic, back to position
__m256i aodd = _mm256_sra_epi16(a,_mm_cvtsi32_si128(b)); // shift odd numbered elements arithmetic
#if INSTRSET >= 10 // AVX512VL + AVX512BW
return _mm256_mask_mov_epi8(aodd, 0x55555555, aeven);
#else
__m256i mask = _mm256_set1_epi32(0x00FF00FF); // mask for even positions
__m256i res = selectb(mask,aeven,aodd); // interleave even and odd
return res;
#endif
}
// vector operator >>= : shift right artihmetic
static inline Vec32c & operator >>= (Vec32c & a, int b) {
a = a >> b;
return a;
}
// vector operator == : returns true for elements for which a == b
static inline Vec32cb operator == (Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
//return _mm256_cmp_epi8_mask (a, b, 0);
return _mm256_cmp_epi8_mask_fix<0> (a, b);
#else
return _mm256_cmpeq_epi8(a,b);
#endif
}
// vector operator != : returns true for elements for which a != b
static inline Vec32cb operator != (Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_cmp_epi8_mask_fix<4> (a, b);
#else
return Vec32cb(Vec32c(~(a == b)));
#endif
}
// vector operator > : returns true for elements for which a > b (signed)
static inline Vec32cb operator > (Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_cmp_epi8_mask_fix<6> (a, b);
#else
return _mm256_cmpgt_epi8(a,b);
#endif
}
// vector operator < : returns true for elements for which a < b (signed)
static inline Vec32cb operator < (Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_cmp_epi8_mask_fix<1> (a, b);
#else
return b > a;
#endif
}
// vector operator >= : returns true for elements for which a >= b (signed)
static inline Vec32cb operator >= (Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_cmp_epi8_mask_fix<5> (a, b);
#else
return Vec32cb(Vec32c(~(b > a)));
#endif
}
// vector operator <= : returns true for elements for which a <= b (signed)
static inline Vec32cb operator <= (Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_cmp_epi8_mask_fix<2> (a, b);
#else
return b >= a;
#endif
}
// vector operator & : bitwise and
static inline Vec32c operator & (Vec32c const a, Vec32c const b) {
return Vec32c(Vec256b(a) & Vec256b(b));
}
static inline Vec32c operator && (Vec32c const a, Vec32c const b) {
return a & b;
}
// vector operator &= : bitwise and
static inline Vec32c & operator &= (Vec32c & a, Vec32c const b) {
a = a & b;
return a;
}
// vector operator | : bitwise or
static inline Vec32c operator | (Vec32c const a, Vec32c const b) {
return Vec32c(Vec256b(a) | Vec256b(b));
}
static inline Vec32c operator || (Vec32c const a, Vec32c const b) {
return a | b;
}
// vector operator |= : bitwise or
static inline Vec32c & operator |= (Vec32c & a, Vec32c const b) {
a = a | b;
return a;
}
// vector operator ^ : bitwise xor
static inline Vec32c operator ^ (Vec32c const a, Vec32c const b) {
return Vec32c(Vec256b(a) ^ Vec256b(b));
}
// vector operator ^= : bitwise xor
static inline Vec32c & operator ^= (Vec32c & a, Vec32c const b) {
a = a ^ b;
return a;
}
// vector operator ~ : bitwise not
static inline Vec32c operator ~ (Vec32c const a) {
return Vec32c( ~ Vec256b(a));
}
// vector operator ! : logical not, returns true for elements == 0
static inline Vec32cb operator ! (Vec32c const a) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_cmp_epi8_mask_fix<0> (a, _mm256_setzero_si256());
#else
return _mm256_cmpeq_epi8(a,_mm256_setzero_si256());
#endif
}
// Functions for this class
// Select between two operands. Corresponds to this pseudocode:
// for (int i = 0; i < 16; i++) result[i] = s[i] ? a[i] : b[i];
static inline Vec32c select (Vec32cb const s, Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_mask_mov_epi8(b, s, a);
#else
return selectb(s,a,b);
#endif
}
// Conditional add: For all vector elements i: result[i] = f[i] ? (a[i] + b[i]) : a[i]
static inline Vec32c if_add (Vec32cb const f, Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_mask_add_epi8 (a, f, a, b);
#else
return a + (Vec32c(f) & b);
#endif
}
// Conditional subtract
static inline Vec32c if_sub (Vec32cb const f, Vec32c const a, Vec32c const b) {
#if INSTRSET >= 10 // compact boolean vectors
return _mm256_mask_sub_epi8 (a, f, a, b);
#else
return a - (Vec32c(f) & b);
#endif
}
// Conditional multiply
static inline Vec32c if_mul (Vec32cb const f, Vec32c const a, Vec32c const b) {
return select(f, a*b, a);
}
// Horizontal add: Calculates the sum of all vector elements. Overflow will wrap around
static inline int8_t horizontal_add (Vec32c const a) {
__m256i sum1 = _mm256_sad_epu8(a,_mm256_setzero_si256());
__m256i sum2 = _mm256_shuffle_epi32(sum1,2);
__m256i sum3 = _mm256_add_epi16(sum1,sum2);
__m128i sum4 = _mm256_extracti128_si256(sum3,1);
__m128i sum5 = _mm_add_epi16(_mm256_castsi256_si128(sum3),sum4);
int8_t sum6 = (int8_t)_mm_cvtsi128_si32(sum5); // truncate to 8 bits
return sum6; // sign extend to 32 bits
}
// Horizontal add extended: Calculates the sum of all vector elements.
// Each element is sign-extended before addition to avoid overflow
static inline int32_t horizontal_add_x (Vec32c const a) {
__m256i aeven = _mm256_slli_epi16(a,8); // even numbered elements of a. get sign bit in position
aeven = _mm256_srai_epi16(aeven,8); // sign extend even numbered elements
__m256i aodd = _mm256_srai_epi16(a,8); // sign extend odd numbered elements
__m256i sum1 = _mm256_add_epi16(aeven,aodd); // add even and odd elements
__m128i sum2 = _mm_add_epi16(_mm256_extracti128_si256(sum1,1),_mm256_castsi256_si128(sum1));
// The hadd instruction is inefficient, and may be split into two instructions for faster decoding
#if false
__m128i sum3 = _mm_hadd_epi16(sum2,sum2);
__m128i sum4 = _mm_hadd_epi16(sum3,sum3);
__m128i sum5 = _mm_hadd_epi16(sum4,sum4);
#else
__m128i sum3 = _mm_add_epi16(sum2,_mm_unpackhi_epi64(sum2,sum2));
__m128i sum4 = _mm_add_epi16(sum3,_mm_shuffle_epi32(sum3,1));
__m128i sum5 = _mm_add_epi16(sum4,_mm_shufflelo_epi16(sum4,1));
#endif
int16_t sum6 = (int16_t)_mm_cvtsi128_si32(sum5); // 16 bit sum
return sum6; // sign extend to 32 bits
}
// function add_saturated: add element by element, signed with saturation
static inline Vec32c add_saturated(Vec32c const a, Vec32c const b) {
return _mm256_adds_epi8(a, b);
}
// function sub_saturated: subtract element by element, signed with saturation
static inline Vec32c sub_saturated(Vec32c const a, Vec32c const b) {
return _mm256_subs_epi8(a, b);
}
// function max: a > b ? a : b
static inline Vec32c max(Vec32c const a, Vec32c const b) {
return _mm256_max_epi8(a,b);
}
// function min: a < b ? a : b
static inline Vec32c min(Vec32c const a, Vec32c const b) {
return _mm256_min_epi8(a,b);
}
// function abs: a >= 0 ? a : -a