1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_IA64_BITOPS_H 3#define _ASM_IA64_BITOPS_H 4 5/* 6 * Copyright (C) 1998-2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * 9 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 10 * O(1) scheduler patch 11 */ 12 13#ifndef _LINUX_BITOPS_H 14#error only <linux/bitops.h> can be included directly 15#endif 16 17#include <linux/compiler.h> 18#include <linux/types.h> 19#include <asm/intrinsics.h> 20#include <asm/barrier.h> 21 22/** 23 * set_bit - Atomically set a bit in memory 24 * @nr: the bit to set 25 * @addr: the address to start counting from 26 * 27 * This function is atomic and may not be reordered. See __set_bit() 28 * if you do not require the atomic guarantees. 29 * Note that @nr may be almost arbitrarily large; this function is not 30 * restricted to acting on a single-word quantity. 31 * 32 * The address must be (at least) "long" aligned. 33 * Note that there are driver (e.g., eepro100) which use these operations to 34 * operate on hw-defined data-structures, so we can't easily change these 35 * operations to force a bigger alignment. 36 * 37 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 38 */ 39static __inline__ void 40set_bit (int nr, volatile void *addr) 41{ 42 __u32 bit, old, new; 43 volatile __u32 *m; 44 CMPXCHG_BUGCHECK_DECL 45 46 m = (volatile __u32 *) addr + (nr >> 5); 47 bit = 1 << (nr & 31); 48 do { 49 CMPXCHG_BUGCHECK(m); 50 old = *m; 51 new = old | bit; 52 } while (cmpxchg_acq(m, old, new) != old); 53} 54 55/** 56 * arch___set_bit - Set a bit in memory 57 * @nr: the bit to set 58 * @addr: the address to start counting from 59 * 60 * Unlike set_bit(), this function is non-atomic and may be reordered. 61 * If it's called on the same region of memory simultaneously, the effect 62 * may be that only one operation succeeds. 63 */ 64static __always_inline void 65arch___set_bit(unsigned long nr, volatile unsigned long *addr) 66{ 67 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); 68} 69 70/** 71 * clear_bit - Clears a bit in memory 72 * @nr: Bit to clear 73 * @addr: Address to start counting from 74 * 75 * clear_bit() is atomic and may not be reordered. However, it does 76 * not contain a memory barrier, so if it is used for locking purposes, 77 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 78 * in order to ensure changes are visible on other processors. 79 */ 80static __inline__ void 81clear_bit (int nr, volatile void *addr) 82{ 83 __u32 mask, old, new; 84 volatile __u32 *m; 85 CMPXCHG_BUGCHECK_DECL 86 87 m = (volatile __u32 *) addr + (nr >> 5); 88 mask = ~(1 << (nr & 31)); 89 do { 90 CMPXCHG_BUGCHECK(m); 91 old = *m; 92 new = old & mask; 93 } while (cmpxchg_acq(m, old, new) != old); 94} 95 96/** 97 * clear_bit_unlock - Clears a bit in memory with release 98 * @nr: Bit to clear 99 * @addr: Address to start counting from 100 * 101 * clear_bit_unlock() is atomic and may not be reordered. It does 102 * contain a memory barrier suitable for unlock type operations. 103 */ 104static __inline__ void 105clear_bit_unlock (int nr, volatile void *addr) 106{ 107 __u32 mask, old, new; 108 volatile __u32 *m; 109 CMPXCHG_BUGCHECK_DECL 110 111 m = (volatile __u32 *) addr + (nr >> 5); 112 mask = ~(1 << (nr & 31)); 113 do { 114 CMPXCHG_BUGCHECK(m); 115 old = *m; 116 new = old & mask; 117 } while (cmpxchg_rel(m, old, new) != old); 118} 119 120/** 121 * __clear_bit_unlock - Non-atomically clears a bit in memory with release 122 * @nr: Bit to clear 123 * @addr: Address to start counting from 124 * 125 * Similarly to clear_bit_unlock, the implementation uses a store 126 * with release semantics. See also arch_spin_unlock(). 127 */ 128static __inline__ void 129__clear_bit_unlock(int nr, void *addr) 130{ 131 __u32 * const m = (__u32 *) addr + (nr >> 5); 132 __u32 const new = *m & ~(1 << (nr & 31)); 133 134 ia64_st4_rel_nta(m, new); 135} 136 137/** 138 * arch___clear_bit - Clears a bit in memory (non-atomic version) 139 * @nr: the bit to clear 140 * @addr: the address to start counting from 141 * 142 * Unlike clear_bit(), this function is non-atomic and may be reordered. 143 * If it's called on the same region of memory simultaneously, the effect 144 * may be that only one operation succeeds. 145 */ 146static __always_inline void 147arch___clear_bit(unsigned long nr, volatile unsigned long *addr) 148{ 149 *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); 150} 151 152/** 153 * change_bit - Toggle a bit in memory 154 * @nr: Bit to toggle 155 * @addr: Address to start counting from 156 * 157 * change_bit() is atomic and may not be reordered. 158 * Note that @nr may be almost arbitrarily large; this function is not 159 * restricted to acting on a single-word quantity. 160 */ 161static __inline__ void 162change_bit (int nr, volatile void *addr) 163{ 164 __u32 bit, old, new; 165 volatile __u32 *m; 166 CMPXCHG_BUGCHECK_DECL 167 168 m = (volatile __u32 *) addr + (nr >> 5); 169 bit = (1 << (nr & 31)); 170 do { 171 CMPXCHG_BUGCHECK(m); 172 old = *m; 173 new = old ^ bit; 174 } while (cmpxchg_acq(m, old, new) != old); 175} 176 177/** 178 * arch___change_bit - Toggle a bit in memory 179 * @nr: the bit to toggle 180 * @addr: the address to start counting from 181 * 182 * Unlike change_bit(), this function is non-atomic and may be reordered. 183 * If it's called on the same region of memory simultaneously, the effect 184 * may be that only one operation succeeds. 185 */ 186static __always_inline void 187arch___change_bit(unsigned long nr, volatile unsigned long *addr) 188{ 189 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); 190} 191 192/** 193 * test_and_set_bit - Set a bit and return its old value 194 * @nr: Bit to set 195 * @addr: Address to count from 196 * 197 * This operation is atomic and cannot be reordered. 198 * It also implies the acquisition side of the memory barrier. 199 */ 200static __inline__ int 201test_and_set_bit (int nr, volatile void *addr) 202{ 203 __u32 bit, old, new; 204 volatile __u32 *m; 205 CMPXCHG_BUGCHECK_DECL 206 207 m = (volatile __u32 *) addr + (nr >> 5); 208 bit = 1 << (nr & 31); 209 do { 210 CMPXCHG_BUGCHECK(m); 211 old = *m; 212 new = old | bit; 213 } while (cmpxchg_acq(m, old, new) != old); 214 return (old & bit) != 0; 215} 216 217/** 218 * test_and_set_bit_lock - Set a bit and return its old value for lock 219 * @nr: Bit to set 220 * @addr: Address to count from 221 * 222 * This is the same as test_and_set_bit on ia64 223 */ 224#define test_and_set_bit_lock test_and_set_bit 225 226/** 227 * arch___test_and_set_bit - Set a bit and return its old value 228 * @nr: Bit to set 229 * @addr: Address to count from 230 * 231 * This operation is non-atomic and can be reordered. 232 * If two examples of this operation race, one can appear to succeed 233 * but actually fail. You must protect multiple accesses with a lock. 234 */ 235static __always_inline bool 236arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 237{ 238 __u32 *p = (__u32 *) addr + (nr >> 5); 239 __u32 m = 1 << (nr & 31); 240 int oldbitset = (*p & m) != 0; 241 242 *p |= m; 243 return oldbitset; 244} 245 246/** 247 * test_and_clear_bit - Clear a bit and return its old value 248 * @nr: Bit to clear 249 * @addr: Address to count from 250 * 251 * This operation is atomic and cannot be reordered. 252 * It also implies the acquisition side of the memory barrier. 253 */ 254static __inline__ int 255test_and_clear_bit (int nr, volatile void *addr) 256{ 257 __u32 mask, old, new; 258 volatile __u32 *m; 259 CMPXCHG_BUGCHECK_DECL 260 261 m = (volatile __u32 *) addr + (nr >> 5); 262 mask = ~(1 << (nr & 31)); 263 do { 264 CMPXCHG_BUGCHECK(m); 265 old = *m; 266 new = old & mask; 267 } while (cmpxchg_acq(m, old, new) != old); 268 return (old & ~mask) != 0; 269} 270 271/** 272 * arch___test_and_clear_bit - Clear a bit and return its old value 273 * @nr: Bit to clear 274 * @addr: Address to count from 275 * 276 * This operation is non-atomic and can be reordered. 277 * If two examples of this operation race, one can appear to succeed 278 * but actually fail. You must protect multiple accesses with a lock. 279 */ 280static __always_inline bool 281arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 282{ 283 __u32 *p = (__u32 *) addr + (nr >> 5); 284 __u32 m = 1 << (nr & 31); 285 int oldbitset = (*p & m) != 0; 286 287 *p &= ~m; 288 return oldbitset; 289} 290 291/** 292 * test_and_change_bit - Change a bit and return its old value 293 * @nr: Bit to change 294 * @addr: Address to count from 295 * 296 * This operation is atomic and cannot be reordered. 297 * It also implies the acquisition side of the memory barrier. 298 */ 299static __inline__ int 300test_and_change_bit (int nr, volatile void *addr) 301{ 302 __u32 bit, old, new; 303 volatile __u32 *m; 304 CMPXCHG_BUGCHECK_DECL 305 306 m = (volatile __u32 *) addr + (nr >> 5); 307 bit = (1 << (nr & 31)); 308 do { 309 CMPXCHG_BUGCHECK(m); 310 old = *m; 311 new = old ^ bit; 312 } while (cmpxchg_acq(m, old, new) != old); 313 return (old & bit) != 0; 314} 315 316/** 317 * arch___test_and_change_bit - Change a bit and return its old value 318 * @nr: Bit to change 319 * @addr: Address to count from 320 * 321 * This operation is non-atomic and can be reordered. 322 */ 323static __always_inline bool 324arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 325{ 326 __u32 old, bit = (1 << (nr & 31)); 327 __u32 *m = (__u32 *) addr + (nr >> 5); 328 329 old = *m; 330 *m = old ^ bit; 331 return (old & bit) != 0; 332} 333 334#define arch_test_bit generic_test_bit 335#define arch_test_bit_acquire generic_test_bit_acquire 336 337/** 338 * ffz - find the first zero bit in a long word 339 * @x: The long word to find the bit in 340 * 341 * Returns the bit-number (0..63) of the first (least significant) zero bit. 342 * Undefined if no zero exists, so code should check against ~0UL first... 343 */ 344static inline unsigned long 345ffz (unsigned long x) 346{ 347 unsigned long result; 348 349 result = ia64_popcnt(x & (~x - 1)); 350 return result; 351} 352 353/** 354 * __ffs - find first bit in word. 355 * @x: The word to search 356 * 357 * Undefined if no bit exists, so code should check against 0 first. 358 */ 359static __inline__ unsigned long 360__ffs (unsigned long x) 361{ 362 unsigned long result; 363 364 result = ia64_popcnt((x-1) & ~x); 365 return result; 366} 367 368#ifdef __KERNEL__ 369 370/* 371 * Return bit number of last (most-significant) bit set. Undefined 372 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3). 373 */ 374static inline unsigned long 375ia64_fls (unsigned long x) 376{ 377 long double d = x; 378 long exp; 379 380 exp = ia64_getf_exp(d); 381 return exp - 0xffff; 382} 383 384/* 385 * Find the last (most significant) bit set. Returns 0 for x==0 and 386 * bits are numbered from 1..32 (e.g., fls(9) == 4). 387 */ 388static inline int fls(unsigned int t) 389{ 390 unsigned long x = t & 0xffffffffu; 391 392 if (!x) 393 return 0; 394 x |= x >> 1; 395 x |= x >> 2; 396 x |= x >> 4; 397 x |= x >> 8; 398 x |= x >> 16; 399 return ia64_popcnt(x); 400} 401 402/* 403 * Find the last (most significant) bit set. Undefined for x==0. 404 * Bits are numbered from 0..63 (e.g., __fls(9) == 3). 405 */ 406static inline unsigned long 407__fls (unsigned long x) 408{ 409 x |= x >> 1; 410 x |= x >> 2; 411 x |= x >> 4; 412 x |= x >> 8; 413 x |= x >> 16; 414 x |= x >> 32; 415 return ia64_popcnt(x) - 1; 416} 417 418#include <asm-generic/bitops/fls64.h> 419 420#include <asm-generic/bitops/builtin-ffs.h> 421 422/* 423 * hweightN: returns the hamming weight (i.e. the number 424 * of bits set) of a N-bit word 425 */ 426static __inline__ unsigned long __arch_hweight64(unsigned long x) 427{ 428 unsigned long result; 429 result = ia64_popcnt(x); 430 return result; 431} 432 433#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful)) 434#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful)) 435#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful)) 436 437#include <asm-generic/bitops/const_hweight.h> 438 439#endif /* __KERNEL__ */ 440 441#ifdef __KERNEL__ 442 443#include <asm-generic/bitops/non-instrumented-non-atomic.h> 444 445#include <asm-generic/bitops/le.h> 446 447#include <asm-generic/bitops/ext2-atomic-setbit.h> 448 449#include <asm-generic/bitops/sched.h> 450 451#endif /* __KERNEL__ */ 452 453#endif /* _ASM_IA64_BITOPS_H */ 454