DPDK  25.03.0
rte_bitops.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Arm Limited
3  * Copyright(c) 2010-2019 Intel Corporation
4  * Copyright(c) 2023 Microsoft Corporation
5  * Copyright(c) 2024 Ericsson AB
6  */
7 
8 #ifndef _RTE_BITOPS_H_
9 #define _RTE_BITOPS_H_
10 
20 #include <stdint.h>
21 
22 #include <rte_compat.h>
23 #include <rte_debug.h>
24 #include <rte_stdatomic.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
36 #define RTE_BIT64(nr) (UINT64_C(1) << (nr))
37 
44 #define RTE_BIT32(nr) (UINT32_C(1) << (nr))
45 
54 #define RTE_SHIFT_VAL32(val, nr) (UINT32_C(val) << (nr))
55 
64 #define RTE_SHIFT_VAL64(val, nr) (UINT64_C(val) << (nr))
65 
75 #define RTE_GENMASK32(high, low) \
76  (((~UINT32_C(0)) << (low)) & (~UINT32_C(0) >> (31u - (high))))
77 
87 #define RTE_GENMASK64(high, low) \
88  (((~UINT64_C(0)) << (low)) & (~UINT64_C(0) >> (63u - (high))))
89 
98 #define RTE_FIELD_GET32(mask, reg) \
99  ((typeof(mask))(((reg) & (mask)) >> rte_ctz32(mask)))
100 
109 #define RTE_FIELD_GET64(mask, reg) \
110  ((typeof(mask))(((reg) & (mask)) >> rte_ctz64(mask)))
111 
130 #define rte_bit_test(addr, nr) \
131  _Generic((addr), \
132  uint32_t *: __rte_bit_test32, \
133  const uint32_t *: __rte_bit_test32, \
134  volatile uint32_t *: __rte_bit_v_test32, \
135  const volatile uint32_t *: __rte_bit_v_test32, \
136  uint64_t *: __rte_bit_test64, \
137  const uint64_t *: __rte_bit_test64, \
138  volatile uint64_t *: __rte_bit_v_test64, \
139  const volatile uint64_t *: __rte_bit_v_test64) \
140  (addr, nr)
141 
160 #define rte_bit_set(addr, nr) \
161  _Generic((addr), \
162  uint32_t *: __rte_bit_set32, \
163  volatile uint32_t *: __rte_bit_v_set32, \
164  uint64_t *: __rte_bit_set64, \
165  volatile uint64_t *: __rte_bit_v_set64) \
166  (addr, nr)
167 
186 #define rte_bit_clear(addr, nr) \
187  _Generic((addr), \
188  uint32_t *: __rte_bit_clear32, \
189  volatile uint32_t *: __rte_bit_v_clear32, \
190  uint64_t *: __rte_bit_clear64, \
191  volatile uint64_t *: __rte_bit_v_clear64) \
192  (addr, nr)
193 
213 #define rte_bit_assign(addr, nr, value) \
214  _Generic((addr), \
215  uint32_t *: __rte_bit_assign32, \
216  volatile uint32_t *: __rte_bit_v_assign32, \
217  uint64_t *: __rte_bit_assign64, \
218  volatile uint64_t *: __rte_bit_v_assign64) \
219  (addr, nr, value)
220 
239 #define rte_bit_flip(addr, nr) \
240  _Generic((addr), \
241  uint32_t *: __rte_bit_flip32, \
242  volatile uint32_t *: __rte_bit_v_flip32, \
243  uint64_t *: __rte_bit_flip64, \
244  volatile uint64_t *: __rte_bit_v_flip64) \
245  (addr, nr)
246 
266 #define rte_bit_atomic_test(addr, nr, memory_order) \
267  _Generic((addr), \
268  uint32_t *: __rte_bit_atomic_test32, \
269  const uint32_t *: __rte_bit_atomic_test32, \
270  volatile uint32_t *: __rte_bit_atomic_v_test32, \
271  const volatile uint32_t *: __rte_bit_atomic_v_test32, \
272  uint64_t *: __rte_bit_atomic_test64, \
273  const uint64_t *: __rte_bit_atomic_test64, \
274  volatile uint64_t *: __rte_bit_atomic_v_test64, \
275  const volatile uint64_t *: __rte_bit_atomic_v_test64) \
276  (addr, nr, memory_order)
277 
295 #define rte_bit_atomic_set(addr, nr, memory_order) \
296  _Generic((addr), \
297  uint32_t *: __rte_bit_atomic_set32, \
298  volatile uint32_t *: __rte_bit_atomic_v_set32, \
299  uint64_t *: __rte_bit_atomic_set64, \
300  volatile uint64_t *: __rte_bit_atomic_v_set64) \
301  (addr, nr, memory_order)
302 
320 #define rte_bit_atomic_clear(addr, nr, memory_order) \
321  _Generic((addr), \
322  uint32_t *: __rte_bit_atomic_clear32, \
323  volatile uint32_t *: __rte_bit_atomic_v_clear32, \
324  uint64_t *: __rte_bit_atomic_clear64, \
325  volatile uint64_t *: __rte_bit_atomic_v_clear64) \
326  (addr, nr, memory_order)
327 
347 #define rte_bit_atomic_assign(addr, nr, value, memory_order) \
348  _Generic((addr), \
349  uint32_t *: __rte_bit_atomic_assign32, \
350  volatile uint32_t *: __rte_bit_atomic_v_assign32, \
351  uint64_t *: __rte_bit_atomic_assign64, \
352  volatile uint64_t *: __rte_bit_atomic_v_assign64) \
353  (addr, nr, value, memory_order)
354 
373 #define rte_bit_atomic_flip(addr, nr, memory_order) \
374  _Generic((addr), \
375  uint32_t *: __rte_bit_atomic_flip32, \
376  volatile uint32_t *: __rte_bit_atomic_v_flip32, \
377  uint64_t *: __rte_bit_atomic_flip64, \
378  volatile uint64_t *: __rte_bit_atomic_v_flip64) \
379  (addr, nr, memory_order)
380 
400 #define rte_bit_atomic_test_and_set(addr, nr, memory_order) \
401  _Generic((addr), \
402  uint32_t *: __rte_bit_atomic_test_and_set32, \
403  volatile uint32_t *: __rte_bit_atomic_v_test_and_set32, \
404  uint64_t *: __rte_bit_atomic_test_and_set64, \
405  volatile uint64_t *: __rte_bit_atomic_v_test_and_set64) \
406  (addr, nr, memory_order)
407 
427 #define rte_bit_atomic_test_and_clear(addr, nr, memory_order) \
428  _Generic((addr), \
429  uint32_t *: __rte_bit_atomic_test_and_clear32, \
430  volatile uint32_t *: __rte_bit_atomic_v_test_and_clear32, \
431  uint64_t *: __rte_bit_atomic_test_and_clear64, \
432  volatile uint64_t *: __rte_bit_atomic_v_test_and_clear64) \
433  (addr, nr, memory_order)
434 
457 #define rte_bit_atomic_test_and_assign(addr, nr, value, memory_order) \
458  _Generic((addr), \
459  uint32_t *: __rte_bit_atomic_test_and_assign32, \
460  volatile uint32_t *: __rte_bit_atomic_v_test_and_assign32, \
461  uint64_t *: __rte_bit_atomic_test_and_assign64, \
462  volatile uint64_t *: __rte_bit_atomic_v_test_and_assign64) \
463  (addr, nr, value, memory_order)
464 
465 #define __RTE_GEN_BIT_TEST(variant, qualifier, size) \
466 __rte_experimental \
467 static inline bool \
468 __rte_bit_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, unsigned int nr) \
469 { \
470  RTE_ASSERT(nr < size); \
471  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
472  return *addr & mask; \
473 }
474 
475 #define __RTE_GEN_BIT_SET(variant, qualifier, size) \
476 __rte_experimental \
477 static inline void \
478 __rte_bit_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
479 { \
480  RTE_ASSERT(nr < size); \
481  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
482  *addr |= mask; \
483 }
484 
485 #define __RTE_GEN_BIT_CLEAR(variant, qualifier, size) \
486 __rte_experimental \
487 static inline void \
488 __rte_bit_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
489 { \
490  RTE_ASSERT(nr < size); \
491  uint ## size ## _t mask = ~((uint ## size ## _t)1 << nr); \
492  (*addr) &= mask; \
493 }
494 
495 #define __RTE_GEN_BIT_ASSIGN(variant, qualifier, size) \
496 __rte_experimental \
497 static inline void \
498 __rte_bit_ ## variant ## assign ## size(qualifier uint ## size ## _t *addr, unsigned int nr, \
499  bool value) \
500 { \
501  if (value) \
502  __rte_bit_ ## variant ## set ## size(addr, nr); \
503  else \
504  __rte_bit_ ## variant ## clear ## size(addr, nr); \
505 }
506 
507 #define __RTE_GEN_BIT_FLIP(variant, qualifier, size) \
508 __rte_experimental \
509 static inline void \
510 __rte_bit_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, unsigned int nr) \
511 { \
512  bool value; \
513  value = __rte_bit_ ## variant ## test ## size(addr, nr); \
514  __rte_bit_ ## variant ## assign ## size(addr, nr, !value); \
515 }
516 
517 #define __RTE_GEN_BIT_OPS(v, qualifier, size) \
518  __RTE_GEN_BIT_TEST(v, qualifier, size) \
519  __RTE_GEN_BIT_SET(v, qualifier, size) \
520  __RTE_GEN_BIT_CLEAR(v, qualifier, size) \
521  __RTE_GEN_BIT_ASSIGN(v, qualifier, size) \
522  __RTE_GEN_BIT_FLIP(v, qualifier, size)
523 
524 #define __RTE_GEN_BIT_OPS_SIZE(size) \
525  __RTE_GEN_BIT_OPS(,, size) \
526  __RTE_GEN_BIT_OPS(v_, volatile, size)
527 
528 #ifdef ALLOW_EXPERIMENTAL_API
529 __RTE_GEN_BIT_OPS_SIZE(32)
530 __RTE_GEN_BIT_OPS_SIZE(64)
531 #endif
532 
533 #define __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
534 __rte_experimental \
535 static inline bool \
536 __rte_bit_atomic_ ## variant ## test ## size(const qualifier uint ## size ## _t *addr, \
537  unsigned int nr, int memory_order) \
538 { \
539  RTE_ASSERT(nr < size); \
540  const qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
541  (const qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
542  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
543  return rte_atomic_load_explicit(a_addr, memory_order) & mask; \
544 }
545 
546 #define __RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \
547 __rte_experimental \
548 static inline void \
549 __rte_bit_atomic_ ## variant ## set ## size(qualifier uint ## size ## _t *addr, \
550  unsigned int nr, int memory_order) \
551 { \
552  RTE_ASSERT(nr < size); \
553  qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
554  (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
555  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
556  rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
557 }
558 
559 #define __RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \
560 __rte_experimental \
561 static inline void \
562 __rte_bit_atomic_ ## variant ## clear ## size(qualifier uint ## size ## _t *addr, \
563  unsigned int nr, int memory_order) \
564 { \
565  RTE_ASSERT(nr < size); \
566  qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
567  (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
568  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
569  rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
570 }
571 
572 #define __RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size) \
573 __rte_experimental \
574 static inline void \
575 __rte_bit_atomic_ ## variant ## flip ## size(qualifier uint ## size ## _t *addr, \
576  unsigned int nr, int memory_order) \
577 { \
578  RTE_ASSERT(nr < size); \
579  qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
580  (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
581  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
582  rte_atomic_fetch_xor_explicit(a_addr, mask, memory_order); \
583 }
584 
585 #define __RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \
586 __rte_experimental \
587 static inline void \
588 __rte_bit_atomic_## variant ## assign ## size(qualifier uint ## size ## _t *addr, \
589  unsigned int nr, bool value, int memory_order) \
590 { \
591  if (value) \
592  __rte_bit_atomic_ ## variant ## set ## size(addr, nr, memory_order); \
593  else \
594  __rte_bit_atomic_ ## variant ## clear ## size(addr, nr, memory_order); \
595 }
596 
597 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \
598 __rte_experimental \
599 static inline bool \
600 __rte_bit_atomic_ ## variant ## test_and_set ## size(qualifier uint ## size ## _t *addr, \
601  unsigned int nr, int memory_order) \
602 { \
603  RTE_ASSERT(nr < size); \
604  qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
605  (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
606  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
607  uint ## size ## _t prev; \
608  prev = rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
609  return prev & mask; \
610 }
611 
612 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \
613 __rte_experimental \
614 static inline bool \
615 __rte_bit_atomic_ ## variant ## test_and_clear ## size(qualifier uint ## size ## _t *addr, \
616  unsigned int nr, int memory_order) \
617 { \
618  RTE_ASSERT(nr < size); \
619  qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
620  (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
621  uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
622  uint ## size ## _t prev; \
623  prev = rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
624  return prev & mask; \
625 }
626 
627 #define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \
628 __rte_experimental \
629 static inline bool \
630 __rte_bit_atomic_ ## variant ## test_and_assign ## size( \
631  qualifier uint ## size ## _t *addr, unsigned int nr, bool value, \
632  int memory_order) \
633 { \
634  if (value) \
635  return __rte_bit_atomic_ ## variant ## test_and_set ## size(addr, nr, \
636  memory_order); \
637  else \
638  return __rte_bit_atomic_ ## variant ## test_and_clear ## size(addr, nr, \
639  memory_order); \
640 }
641 
642 #define __RTE_GEN_BIT_ATOMIC_OPS(variant, qualifier, size) \
643  __RTE_GEN_BIT_ATOMIC_TEST(variant, qualifier, size) \
644  __RTE_GEN_BIT_ATOMIC_SET(variant, qualifier, size) \
645  __RTE_GEN_BIT_ATOMIC_CLEAR(variant, qualifier, size) \
646  __RTE_GEN_BIT_ATOMIC_ASSIGN(variant, qualifier, size) \
647  __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(variant, qualifier, size) \
648  __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(variant, qualifier, size) \
649  __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(variant, qualifier, size) \
650  __RTE_GEN_BIT_ATOMIC_FLIP(variant, qualifier, size)
651 
652 #define __RTE_GEN_BIT_ATOMIC_OPS_SIZE(size) \
653  __RTE_GEN_BIT_ATOMIC_OPS(,, size) \
654  __RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size)
655 
656 #ifdef ALLOW_EXPERIMENTAL_API
657 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(32)
658 __RTE_GEN_BIT_ATOMIC_OPS_SIZE(64)
659 #endif
660 
661 /*------------------------ 32-bit relaxed operations ------------------------*/
662 
673 static inline uint32_t
674 rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr)
675 {
676  RTE_ASSERT(nr < 32);
677 
678  uint32_t mask = UINT32_C(1) << nr;
679  return (*addr) & mask;
680 }
681 
690 static inline void
691 rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t *addr)
692 {
693  RTE_ASSERT(nr < 32);
694 
695  uint32_t mask = RTE_BIT32(nr);
696  *addr = (*addr) | mask;
697 }
698 
707 static inline void
708 rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t *addr)
709 {
710  RTE_ASSERT(nr < 32);
711 
712  uint32_t mask = RTE_BIT32(nr);
713  *addr = (*addr) & (~mask);
714 }
715 
727 static inline uint32_t
728 rte_bit_relaxed_test_and_set32(unsigned int nr, volatile uint32_t *addr)
729 {
730  RTE_ASSERT(nr < 32);
731 
732  uint32_t mask = RTE_BIT32(nr);
733  uint32_t val = *addr;
734  *addr = val | mask;
735  return val & mask;
736 }
737 
749 static inline uint32_t
750 rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr)
751 {
752  RTE_ASSERT(nr < 32);
753 
754  uint32_t mask = RTE_BIT32(nr);
755  uint32_t val = *addr;
756  *addr = val & (~mask);
757  return val & mask;
758 }
759 
760 /*------------------------ 64-bit relaxed operations ------------------------*/
761 
772 static inline uint64_t
773 rte_bit_relaxed_get64(unsigned int nr, volatile uint64_t *addr)
774 {
775  RTE_ASSERT(nr < 64);
776 
777  uint64_t mask = RTE_BIT64(nr);
778  return (*addr) & mask;
779 }
780 
789 static inline void
790 rte_bit_relaxed_set64(unsigned int nr, volatile uint64_t *addr)
791 {
792  RTE_ASSERT(nr < 64);
793 
794  uint64_t mask = RTE_BIT64(nr);
795  (*addr) = (*addr) | mask;
796 }
797 
806 static inline void
807 rte_bit_relaxed_clear64(unsigned int nr, volatile uint64_t *addr)
808 {
809  RTE_ASSERT(nr < 64);
810 
811  uint64_t mask = RTE_BIT64(nr);
812  *addr = (*addr) & (~mask);
813 }
814 
826 static inline uint64_t
827 rte_bit_relaxed_test_and_set64(unsigned int nr, volatile uint64_t *addr)
828 {
829  RTE_ASSERT(nr < 64);
830 
831  uint64_t mask = RTE_BIT64(nr);
832  uint64_t val = *addr;
833  *addr = val | mask;
834  return val;
835 }
836 
848 static inline uint64_t
849 rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr)
850 {
851  RTE_ASSERT(nr < 64);
852 
853  uint64_t mask = RTE_BIT64(nr);
854  uint64_t val = *addr;
855  *addr = val & (~mask);
856  return val & mask;
857 }
858 
859 #ifdef RTE_TOOLCHAIN_MSVC
860 
869 static inline unsigned int
870 rte_clz32(uint32_t v)
871 {
872  unsigned long rv;
873 
874  (void)_BitScanReverse(&rv, v);
875 
876  return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv);
877 }
878 
887 static inline unsigned int
888 rte_clz64(uint64_t v)
889 {
890  unsigned long rv;
891 
892  (void)_BitScanReverse64(&rv, v);
893 
894  return (unsigned int)(sizeof(v) * CHAR_BIT - 1 - rv);
895 }
896 
905 static inline unsigned int
906 rte_ctz32(uint32_t v)
907 {
908  unsigned long rv;
909 
910  (void)_BitScanForward(&rv, v);
911 
912  return (unsigned int)rv;
913 }
914 
923 static inline unsigned int
924 rte_ctz64(uint64_t v)
925 {
926  unsigned long rv;
927 
928  (void)_BitScanForward64(&rv, v);
929 
930  return (unsigned int)rv;
931 }
932 
941 static inline unsigned int
942 rte_popcount32(uint32_t v)
943 {
944  return (unsigned int)__popcnt(v);
945 }
946 
955 static inline unsigned int
956 rte_popcount64(uint64_t v)
957 {
958  return (unsigned int)__popcnt64(v);
959 }
960 
973 __rte_experimental
974 static inline unsigned int
975 rte_ffs32(uint32_t v)
976 {
977  unsigned long rv;
978 
979  if (_BitScanForward(&rv, v) == 0)
980  return 0;
981 
982  return (unsigned int)rv + 1;
983 }
984 
997 __rte_experimental
998 static inline unsigned int
999 rte_ffs64(uint64_t v)
1000 {
1001  unsigned long rv;
1002 
1003  if (_BitScanForward64(&rv, v) == 0)
1004  return 0;
1005 
1006  return (unsigned int)rv + 1;
1007 }
1008 
1009 #else
1010 
1019 static inline unsigned int
1020 rte_clz32(uint32_t v)
1021 {
1022  return (unsigned int)__builtin_clz(v);
1023 }
1024 
1033 static inline unsigned int
1034 rte_clz64(uint64_t v)
1035 {
1036  return (unsigned int)__builtin_clzll(v);
1037 }
1038 
1047 static inline unsigned int
1048 rte_ctz32(uint32_t v)
1049 {
1050  return (unsigned int)__builtin_ctz(v);
1051 }
1052 
1061 static inline unsigned int
1062 rte_ctz64(uint64_t v)
1063 {
1064  return (unsigned int)__builtin_ctzll(v);
1065 }
1066 
1075 static inline unsigned int
1076 rte_popcount32(uint32_t v)
1077 {
1078  return (unsigned int)__builtin_popcount(v);
1079 }
1080 
1089 static inline unsigned int
1090 rte_popcount64(uint64_t v)
1091 {
1092  return (unsigned int)__builtin_popcountll(v);
1093 }
1094 
1107 __rte_experimental
1108 static inline unsigned int
1109 rte_ffs32(uint32_t v)
1110 {
1111  return (unsigned int)__builtin_ffs(v);
1112 }
1113 
1126 __rte_experimental
1127 static inline unsigned int
1128 rte_ffs64(uint64_t v)
1129 {
1130  return (unsigned int)__builtin_ffsll(v);
1131 }
1132 
1133 #endif
1134 
1145 static inline uint32_t
1147 {
1148  x |= x >> 1;
1149  x |= x >> 2;
1150  x |= x >> 4;
1151  x |= x >> 8;
1152  x |= x >> 16;
1153 
1154  return x;
1155 }
1156 
1167 static inline uint64_t
1169 {
1170  v |= v >> 1;
1171  v |= v >> 2;
1172  v |= v >> 4;
1173  v |= v >> 8;
1174  v |= v >> 16;
1175  v |= v >> 32;
1176 
1177  return v;
1178 }
1179 
1191 static inline uint32_t
1192 rte_bsf32(uint32_t v)
1193 {
1194  return (uint32_t)rte_ctz32(v);
1195 }
1196 
1211 static inline int
1212 rte_bsf32_safe(uint32_t v, uint32_t *pos)
1213 {
1214  if (v == 0)
1215  return 0;
1216 
1217  *pos = rte_bsf32(v);
1218  return 1;
1219 }
1220 
1232 static inline uint32_t
1233 rte_bsf64(uint64_t v)
1234 {
1235  return (uint32_t)rte_ctz64(v);
1236 }
1237 
1252 static inline int
1253 rte_bsf64_safe(uint64_t v, uint32_t *pos)
1254 {
1255  if (v == 0)
1256  return 0;
1257 
1258  *pos = rte_bsf64(v);
1259  return 1;
1260 }
1261 
1273 static inline uint32_t
1274 rte_fls_u32(uint32_t x)
1275 {
1276  return (x == 0) ? 0 : 32 - rte_clz32(x);
1277 }
1278 
1291 static inline uint32_t
1292 rte_fls_u64(uint64_t x)
1293 {
1294  return (x == 0) ? 0 : 64 - rte_clz64(x);
1295 }
1296 
1297 /*********** Macros to work with powers of 2 ********/
1298 
1302 #define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n)))
1303 
1310 static inline int
1312 {
1313  return n && !(n & (n - 1));
1314 }
1315 
1325 static inline uint32_t
1326 rte_align32pow2(uint32_t x)
1327 {
1328  x--;
1329  x = rte_combine32ms1b(x);
1330 
1331  return x + 1;
1332 }
1333 
1343 static inline uint32_t
1345 {
1346  x = rte_combine32ms1b(x);
1347 
1348  return x - (x >> 1);
1349 }
1350 
1360 static inline uint64_t
1361 rte_align64pow2(uint64_t v)
1362 {
1363  v--;
1364  v = rte_combine64ms1b(v);
1365 
1366  return v + 1;
1367 }
1368 
1378 static inline uint64_t
1380 {
1381  v = rte_combine64ms1b(v);
1382 
1383  return v - (v >> 1);
1384 }
1385 
1397 static inline uint32_t
1398 rte_log2_u32(uint32_t v)
1399 {
1400  if (v == 0)
1401  return 0;
1402  v = rte_align32pow2(v);
1403  return rte_bsf32(v);
1404 }
1405 
1417 static inline uint32_t
1418 rte_log2_u64(uint64_t v)
1419 {
1420  if (v == 0)
1421  return 0;
1422  v = rte_align64pow2(v);
1423  /* we checked for v being 0 already, so no undefined behavior */
1424  return rte_bsf64(v);
1425 }
1426 
1427 #ifdef __cplusplus
1428 }
1429 
1430 /*
1431  * Since C++ doesn't support generic selection (i.e., _Generic),
1432  * function overloading is used instead. Such functions must be
1433  * defined outside 'extern "C"' to be accepted by the compiler.
1434  */
1435 
1436 #undef rte_bit_test
1437 #undef rte_bit_set
1438 #undef rte_bit_clear
1439 #undef rte_bit_assign
1440 #undef rte_bit_flip
1441 
1442 #undef rte_bit_atomic_test
1443 #undef rte_bit_atomic_set
1444 #undef rte_bit_atomic_clear
1445 #undef rte_bit_atomic_assign
1446 #undef rte_bit_atomic_flip
1447 #undef rte_bit_atomic_test_and_set
1448 #undef rte_bit_atomic_test_and_clear
1449 #undef rte_bit_atomic_test_and_assign
1450 
1451 #define __RTE_BIT_OVERLOAD_V_2(family, v, fun, qualifier, size, arg1_type, arg1_name) \
1452 static inline void \
1453 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \
1454 { \
1455  __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \
1456 }
1457 
1458 #define __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, size, arg1_type, arg1_name) \
1459  __RTE_BIT_OVERLOAD_V_2(family,, fun, qualifier, size, arg1_type, arg1_name) \
1460  __RTE_BIT_OVERLOAD_V_2(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name)
1461 
1462 #define __RTE_BIT_OVERLOAD_2(family, fun, qualifier, arg1_type, arg1_name) \
1463  __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 32, arg1_type, arg1_name) \
1464  __RTE_BIT_OVERLOAD_SZ_2(family, fun, qualifier, 64, arg1_type, arg1_name)
1465 
1466 #define __RTE_BIT_OVERLOAD_V_2R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1467 static inline ret_type \
1468 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name) \
1469 { \
1470  return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name); \
1471 }
1472 
1473 #define __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1474  __RTE_BIT_OVERLOAD_V_2R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name) \
1475  __RTE_BIT_OVERLOAD_V_2R(family, v_, fun, qualifier volatile, size, ret_type, arg1_type, \
1476  arg1_name)
1477 
1478 #define __RTE_BIT_OVERLOAD_2R(family, fun, qualifier, ret_type, arg1_type, arg1_name) \
1479  __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name) \
1480  __RTE_BIT_OVERLOAD_SZ_2R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name)
1481 
1482 #define __RTE_BIT_OVERLOAD_V_3(family, v, fun, qualifier, size, arg1_type, arg1_name, \
1483  arg2_type, arg2_name) \
1484 static inline void \
1485 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1486  arg2_type arg2_name) \
1487 { \
1488  __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \
1489 }
1490 
1491 #define __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, size, arg1_type, arg1_name, \
1492  arg2_type, arg2_name) \
1493  __RTE_BIT_OVERLOAD_V_3(family,, fun, qualifier, size, arg1_type, arg1_name, \
1494  arg2_type, arg2_name) \
1495  __RTE_BIT_OVERLOAD_V_3(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \
1496  arg2_type, arg2_name)
1497 
1498 #define __RTE_BIT_OVERLOAD_3(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name) \
1499  __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 32, arg1_type, arg1_name, \
1500  arg2_type, arg2_name) \
1501  __RTE_BIT_OVERLOAD_SZ_3(family, fun, qualifier, 64, arg1_type, arg1_name, \
1502  arg2_type, arg2_name)
1503 
1504 #define __RTE_BIT_OVERLOAD_V_3R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1505  arg2_type, arg2_name) \
1506 static inline ret_type \
1507 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1508  arg2_type arg2_name) \
1509 { \
1510  return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name); \
1511 }
1512 
1513 #define __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1514  arg2_type, arg2_name) \
1515  __RTE_BIT_OVERLOAD_V_3R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1516  arg2_type, arg2_name) \
1517  __RTE_BIT_OVERLOAD_V_3R(family, v_, fun, qualifier volatile, size, ret_type, \
1518  arg1_type, arg1_name, arg2_type, arg2_name)
1519 
1520 #define __RTE_BIT_OVERLOAD_3R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \
1521  arg2_type, arg2_name) \
1522  __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \
1523  arg2_type, arg2_name) \
1524  __RTE_BIT_OVERLOAD_SZ_3R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
1525  arg2_type, arg2_name)
1526 
1527 #define __RTE_BIT_OVERLOAD_V_4(family, v, fun, qualifier, size, arg1_type, arg1_name, \
1528  arg2_type, arg2_name, arg3_type, arg3_name) \
1529 static inline void \
1530 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1531  arg2_type arg2_name, arg3_type arg3_name) \
1532 { \
1533  __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, arg3_name); \
1534 }
1535 
1536 #define __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, size, arg1_type, arg1_name, \
1537  arg2_type, arg2_name, arg3_type, arg3_name) \
1538  __RTE_BIT_OVERLOAD_V_4(family,, fun, qualifier, size, arg1_type, arg1_name, \
1539  arg2_type, arg2_name, arg3_type, arg3_name) \
1540  __RTE_BIT_OVERLOAD_V_4(family, v_, fun, qualifier volatile, size, arg1_type, arg1_name, \
1541  arg2_type, arg2_name, arg3_type, arg3_name)
1542 
1543 #define __RTE_BIT_OVERLOAD_4(family, fun, qualifier, arg1_type, arg1_name, arg2_type, arg2_name, \
1544  arg3_type, arg3_name) \
1545  __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 32, arg1_type, arg1_name, \
1546  arg2_type, arg2_name, arg3_type, arg3_name) \
1547  __RTE_BIT_OVERLOAD_SZ_4(family, fun, qualifier, 64, arg1_type, arg1_name, \
1548  arg2_type, arg2_name, arg3_type, arg3_name)
1549 
1550 #define __RTE_BIT_OVERLOAD_V_4R(family, v, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1551  arg2_type, arg2_name, arg3_type, arg3_name) \
1552 static inline ret_type \
1553 rte_bit_ ## family ## fun(qualifier uint ## size ## _t *addr, arg1_type arg1_name, \
1554  arg2_type arg2_name, arg3_type arg3_name) \
1555 { \
1556  return __rte_bit_ ## family ## v ## fun ## size(addr, arg1_name, arg2_name, \
1557  arg3_name); \
1558 }
1559 
1560 #define __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1561  arg2_type, arg2_name, arg3_type, arg3_name) \
1562  __RTE_BIT_OVERLOAD_V_4R(family,, fun, qualifier, size, ret_type, arg1_type, arg1_name, \
1563  arg2_type, arg2_name, arg3_type, arg3_name) \
1564  __RTE_BIT_OVERLOAD_V_4R(family, v_, fun, qualifier volatile, size, ret_type, \
1565  arg1_type, arg1_name, arg2_type, arg2_name, arg3_type, arg3_name)
1566 
1567 #define __RTE_BIT_OVERLOAD_4R(family, fun, qualifier, ret_type, arg1_type, arg1_name, \
1568  arg2_type, arg2_name, arg3_type, arg3_name) \
1569  __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 32, ret_type, arg1_type, arg1_name, \
1570  arg2_type, arg2_name, arg3_type, arg3_name) \
1571  __RTE_BIT_OVERLOAD_SZ_4R(family, fun, qualifier, 64, ret_type, arg1_type, arg1_name, \
1572  arg2_type, arg2_name, arg3_type, arg3_name)
1573 
1574 #ifdef ALLOW_EXPERIMENTAL_API
1575 __RTE_BIT_OVERLOAD_2R(, test, const, bool, unsigned int, nr)
1576 __RTE_BIT_OVERLOAD_2(, set,, unsigned int, nr)
1577 __RTE_BIT_OVERLOAD_2(, clear,, unsigned int, nr)
1578 __RTE_BIT_OVERLOAD_3(, assign,, unsigned int, nr, bool, value)
1579 __RTE_BIT_OVERLOAD_2(, flip,, unsigned int, nr)
1580 
1581 __RTE_BIT_OVERLOAD_3R(atomic_, test, const, bool, unsigned int, nr, int, memory_order)
1582 __RTE_BIT_OVERLOAD_3(atomic_, set,, unsigned int, nr, int, memory_order)
1583 __RTE_BIT_OVERLOAD_3(atomic_, clear,, unsigned int, nr, int, memory_order)
1584 __RTE_BIT_OVERLOAD_4(atomic_, assign,, unsigned int, nr, bool, value, int, memory_order)
1585 __RTE_BIT_OVERLOAD_3(atomic_, flip,, unsigned int, nr, int, memory_order)
1586 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_set,, bool, unsigned int, nr, int, memory_order)
1587 __RTE_BIT_OVERLOAD_3R(atomic_, test_and_clear,, bool, unsigned int, nr, int, memory_order)
1588 __RTE_BIT_OVERLOAD_4R(atomic_, test_and_assign,, bool, unsigned int, nr, bool, value,
1589  int, memory_order)
1590 #endif
1591 
1592 #endif
1593 
1594 #endif /* _RTE_BITOPS_H_ */
static int rte_bsf32_safe(uint32_t v, uint32_t *pos)
Definition: rte_bitops.h:1212
static void rte_bit_relaxed_clear64(unsigned int nr, volatile uint64_t *addr)
Definition: rte_bitops.h:807
static void rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t *addr)
Definition: rte_bitops.h:708
static unsigned int rte_popcount32(uint32_t v)
Definition: rte_bitops.h:1076
static uint32_t rte_bit_relaxed_test_and_set32(unsigned int nr, volatile uint32_t *addr)
Definition: rte_bitops.h:728
static uint32_t rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr)
Definition: rte_bitops.h:750
static unsigned int rte_clz32(uint32_t v)
Definition: rte_bitops.h:1020
static uint32_t rte_bsf32(uint32_t v)
Definition: rte_bitops.h:1192
static void rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t *addr)
Definition: rte_bitops.h:691
#define RTE_BIT32(nr)
Definition: rte_bitops.h:44
static uint32_t rte_align32prevpow2(uint32_t x)
Definition: rte_bitops.h:1344
static uint32_t rte_fls_u64(uint64_t x)
Definition: rte_bitops.h:1292
static uint32_t rte_bsf64(uint64_t v)
Definition: rte_bitops.h:1233
static uint32_t rte_align32pow2(uint32_t x)
Definition: rte_bitops.h:1326
static uint32_t rte_fls_u32(uint32_t x)
Definition: rte_bitops.h:1274
static int rte_is_power_of_2(uint32_t n)
Definition: rte_bitops.h:1311
static int rte_bsf64_safe(uint64_t v, uint32_t *pos)
Definition: rte_bitops.h:1253
static uint64_t rte_align64prevpow2(uint64_t v)
Definition: rte_bitops.h:1379
static uint32_t rte_log2_u32(uint32_t v)
Definition: rte_bitops.h:1398
static unsigned int rte_ctz64(uint64_t v)
Definition: rte_bitops.h:1062
static uint64_t rte_bit_relaxed_test_and_set64(unsigned int nr, volatile uint64_t *addr)
Definition: rte_bitops.h:827
static __rte_experimental unsigned int rte_ffs32(uint32_t v)
Definition: rte_bitops.h:1109
static unsigned int rte_ctz32(uint32_t v)
Definition: rte_bitops.h:1048
static uint64_t rte_align64pow2(uint64_t v)
Definition: rte_bitops.h:1361
static uint64_t rte_bit_relaxed_get64(unsigned int nr, volatile uint64_t *addr)
Definition: rte_bitops.h:773
static uint64_t rte_bit_relaxed_test_and_clear64(unsigned int nr, volatile uint64_t *addr)
Definition: rte_bitops.h:849
static __rte_experimental unsigned int rte_ffs64(uint64_t v)
Definition: rte_bitops.h:1128
static uint32_t rte_combine32ms1b(uint32_t x)
Definition: rte_bitops.h:1146
static uint64_t rte_combine64ms1b(uint64_t v)
Definition: rte_bitops.h:1168
static void rte_bit_relaxed_set64(unsigned int nr, volatile uint64_t *addr)
Definition: rte_bitops.h:790
static unsigned int rte_popcount64(uint64_t v)
Definition: rte_bitops.h:1090
static uint32_t rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr)
Definition: rte_bitops.h:674
static unsigned int rte_clz64(uint64_t v)
Definition: rte_bitops.h:1034
#define RTE_BIT64(nr)
Definition: rte_bitops.h:36
static uint32_t rte_log2_u64(uint64_t v)
Definition: rte_bitops.h:1418