summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYury Norov <yury.norov@gmail.com>2021-08-14 23:17:07 +0200
committerYury Norov <yury.norov@gmail.com>2022-01-15 17:47:31 +0100
commit7516be9931b8bc8bcaac8531f490b42ab11ded1e (patch)
tree7437188f8ff4a29f05075c3932641aeec0b6d12f
parentinclude/linux: move for_each_bit() macros from bitops.h to find.h (diff)
downloadlinux-7516be9931b8bc8bcaac8531f490b42ab11ded1e.tar.xz
linux-7516be9931b8bc8bcaac8531f490b42ab11ded1e.zip
find: micro-optimize for_each_{set,clear}_bit()
The macros iterate thru all set/clear bits in a bitmap. They search a first bit using find_first_bit(), and the rest bits using find_next_bit(). Since find_next_bit() is called shortly after find_first_bit(), we can save few lines of I-cache by not using find_first_bit(). Signed-off-by: Yury Norov <yury.norov@gmail.com> Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
-rw-r--r--include/linux/find.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/find.h b/include/linux/find.h
index 4500e8ab93e2..ae9ed52b52b8 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -280,7 +280,7 @@ unsigned long find_next_bit_le(const void *addr, unsigned
#endif
#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
+ for ((bit) = find_next_bit((addr), (size), 0); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
@@ -291,7 +291,7 @@ unsigned long find_next_bit_le(const void *addr, unsigned
(bit) = find_next_bit((addr), (size), (bit) + 1))
#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
+ for ((bit) = find_next_zero_bit((addr), (size), 0); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))