18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-only */ 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 48c2ecf20Sopenharmony_ci */ 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci#ifndef _ASM_ARC_ATOMIC_H 78c2ecf20Sopenharmony_ci#define _ASM_ARC_ATOMIC_H 88c2ecf20Sopenharmony_ci 98c2ecf20Sopenharmony_ci#ifndef __ASSEMBLY__ 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_ci#include <linux/types.h> 128c2ecf20Sopenharmony_ci#include <linux/compiler.h> 138c2ecf20Sopenharmony_ci#include <asm/cmpxchg.h> 148c2ecf20Sopenharmony_ci#include <asm/barrier.h> 158c2ecf20Sopenharmony_ci#include <asm/smp.h> 168c2ecf20Sopenharmony_ci 178c2ecf20Sopenharmony_ci#define atomic_read(v) READ_ONCE((v)->counter) 188c2ecf20Sopenharmony_ci 198c2ecf20Sopenharmony_ci#ifdef CONFIG_ARC_HAS_LLSC 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) 228c2ecf20Sopenharmony_ci 238c2ecf20Sopenharmony_ci#define ATOMIC_OP(op, c_op, asm_op) \ 248c2ecf20Sopenharmony_cistatic inline void atomic_##op(int i, atomic_t *v) \ 258c2ecf20Sopenharmony_ci{ \ 268c2ecf20Sopenharmony_ci unsigned int val; \ 278c2ecf20Sopenharmony_ci \ 288c2ecf20Sopenharmony_ci __asm__ __volatile__( \ 298c2ecf20Sopenharmony_ci "1: llock %[val], [%[ctr]] \n" \ 308c2ecf20Sopenharmony_ci " " #asm_op " %[val], %[val], %[i] \n" \ 318c2ecf20Sopenharmony_ci " scond %[val], [%[ctr]] \n" \ 328c2ecf20Sopenharmony_ci " bnz 1b \n" \ 338c2ecf20Sopenharmony_ci : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ 348c2ecf20Sopenharmony_ci : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ 358c2ecf20Sopenharmony_ci [i] "ir" (i) \ 368c2ecf20Sopenharmony_ci : "cc"); \ 378c2ecf20Sopenharmony_ci} \ 388c2ecf20Sopenharmony_ci 398c2ecf20Sopenharmony_ci#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 408c2ecf20Sopenharmony_cistatic inline int atomic_##op##_return(int i, atomic_t *v) \ 418c2ecf20Sopenharmony_ci{ \ 428c2ecf20Sopenharmony_ci unsigned int val; \ 438c2ecf20Sopenharmony_ci \ 448c2ecf20Sopenharmony_ci /* \ 458c2ecf20Sopenharmony_ci * Explicit full memory barrier needed before/after as \ 468c2ecf20Sopenharmony_ci * LLOCK/SCOND themselves don't provide any such semantics \ 478c2ecf20Sopenharmony_ci */ \ 488c2ecf20Sopenharmony_ci smp_mb(); \ 498c2ecf20Sopenharmony_ci \ 508c2ecf20Sopenharmony_ci __asm__ __volatile__( \ 518c2ecf20Sopenharmony_ci "1: llock %[val], [%[ctr]] \n" \ 528c2ecf20Sopenharmony_ci " " #asm_op " %[val], %[val], %[i] \n" \ 538c2ecf20Sopenharmony_ci " scond %[val], [%[ctr]] \n" \ 548c2ecf20Sopenharmony_ci " bnz 1b \n" \ 558c2ecf20Sopenharmony_ci : [val] "=&r" (val) \ 568c2ecf20Sopenharmony_ci : [ctr] "r" (&v->counter), \ 578c2ecf20Sopenharmony_ci [i] "ir" (i) \ 588c2ecf20Sopenharmony_ci : "cc"); \ 598c2ecf20Sopenharmony_ci \ 608c2ecf20Sopenharmony_ci smp_mb(); \ 618c2ecf20Sopenharmony_ci \ 628c2ecf20Sopenharmony_ci return val; \ 638c2ecf20Sopenharmony_ci} 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_ci#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ 668c2ecf20Sopenharmony_cistatic inline int atomic_fetch_##op(int i, atomic_t *v) \ 678c2ecf20Sopenharmony_ci{ \ 688c2ecf20Sopenharmony_ci unsigned int val, orig; \ 698c2ecf20Sopenharmony_ci \ 708c2ecf20Sopenharmony_ci /* \ 718c2ecf20Sopenharmony_ci * Explicit full memory barrier needed before/after as \ 728c2ecf20Sopenharmony_ci * LLOCK/SCOND themselves don't provide any such semantics \ 738c2ecf20Sopenharmony_ci */ \ 748c2ecf20Sopenharmony_ci smp_mb(); \ 758c2ecf20Sopenharmony_ci \ 768c2ecf20Sopenharmony_ci __asm__ __volatile__( \ 778c2ecf20Sopenharmony_ci "1: llock %[orig], [%[ctr]] \n" \ 788c2ecf20Sopenharmony_ci " " #asm_op " %[val], %[orig], %[i] \n" \ 798c2ecf20Sopenharmony_ci " scond %[val], [%[ctr]] \n" \ 808c2ecf20Sopenharmony_ci " bnz 1b \n" \ 818c2ecf20Sopenharmony_ci : [val] "=&r" (val), \ 828c2ecf20Sopenharmony_ci [orig] "=&r" (orig) \ 838c2ecf20Sopenharmony_ci : [ctr] "r" (&v->counter), \ 848c2ecf20Sopenharmony_ci [i] "ir" (i) \ 858c2ecf20Sopenharmony_ci : "cc"); \ 868c2ecf20Sopenharmony_ci \ 878c2ecf20Sopenharmony_ci smp_mb(); \ 888c2ecf20Sopenharmony_ci \ 898c2ecf20Sopenharmony_ci return orig; \ 908c2ecf20Sopenharmony_ci} 918c2ecf20Sopenharmony_ci 928c2ecf20Sopenharmony_ci#else /* !CONFIG_ARC_HAS_LLSC */ 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci#ifndef CONFIG_SMP 958c2ecf20Sopenharmony_ci 968c2ecf20Sopenharmony_ci /* violating atomic_xxx API locking protocol in UP for optimization sake */ 978c2ecf20Sopenharmony_ci#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci#else 1008c2ecf20Sopenharmony_ci 1018c2ecf20Sopenharmony_cistatic inline void atomic_set(atomic_t *v, int i) 1028c2ecf20Sopenharmony_ci{ 1038c2ecf20Sopenharmony_ci /* 1048c2ecf20Sopenharmony_ci * Independent of hardware support, all of the atomic_xxx() APIs need 1058c2ecf20Sopenharmony_ci * to follow the same locking rules to make sure that a "hardware" 1068c2ecf20Sopenharmony_ci * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn 1078c2ecf20Sopenharmony_ci * sequence 1088c2ecf20Sopenharmony_ci * 1098c2ecf20Sopenharmony_ci * Thus atomic_set() despite being 1 insn (and seemingly atomic) 1108c2ecf20Sopenharmony_ci * requires the locking. 1118c2ecf20Sopenharmony_ci */ 1128c2ecf20Sopenharmony_ci unsigned long flags; 1138c2ecf20Sopenharmony_ci 1148c2ecf20Sopenharmony_ci atomic_ops_lock(flags); 1158c2ecf20Sopenharmony_ci WRITE_ONCE(v->counter, i); 1168c2ecf20Sopenharmony_ci atomic_ops_unlock(flags); 1178c2ecf20Sopenharmony_ci} 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_ci#define atomic_set_release(v, i) atomic_set((v), (i)) 1208c2ecf20Sopenharmony_ci 1218c2ecf20Sopenharmony_ci#endif 1228c2ecf20Sopenharmony_ci 1238c2ecf20Sopenharmony_ci/* 1248c2ecf20Sopenharmony_ci * Non hardware assisted Atomic-R-M-W 1258c2ecf20Sopenharmony_ci * Locking would change to irq-disabling only (UP) and spinlocks (SMP) 1268c2ecf20Sopenharmony_ci */ 1278c2ecf20Sopenharmony_ci 1288c2ecf20Sopenharmony_ci#define ATOMIC_OP(op, c_op, asm_op) \ 1298c2ecf20Sopenharmony_cistatic inline void atomic_##op(int i, atomic_t *v) \ 1308c2ecf20Sopenharmony_ci{ \ 1318c2ecf20Sopenharmony_ci unsigned long flags; \ 1328c2ecf20Sopenharmony_ci \ 1338c2ecf20Sopenharmony_ci atomic_ops_lock(flags); \ 1348c2ecf20Sopenharmony_ci v->counter c_op i; \ 1358c2ecf20Sopenharmony_ci atomic_ops_unlock(flags); \ 1368c2ecf20Sopenharmony_ci} 1378c2ecf20Sopenharmony_ci 1388c2ecf20Sopenharmony_ci#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 1398c2ecf20Sopenharmony_cistatic inline int atomic_##op##_return(int i, atomic_t *v) \ 1408c2ecf20Sopenharmony_ci{ \ 1418c2ecf20Sopenharmony_ci unsigned long flags; \ 1428c2ecf20Sopenharmony_ci unsigned long temp; \ 1438c2ecf20Sopenharmony_ci \ 1448c2ecf20Sopenharmony_ci /* \ 1458c2ecf20Sopenharmony_ci * spin lock/unlock provides the needed smp_mb() before/after \ 1468c2ecf20Sopenharmony_ci */ \ 1478c2ecf20Sopenharmony_ci atomic_ops_lock(flags); \ 1488c2ecf20Sopenharmony_ci temp = v->counter; \ 1498c2ecf20Sopenharmony_ci temp c_op i; \ 1508c2ecf20Sopenharmony_ci v->counter = temp; \ 1518c2ecf20Sopenharmony_ci atomic_ops_unlock(flags); \ 1528c2ecf20Sopenharmony_ci \ 1538c2ecf20Sopenharmony_ci return temp; \ 1548c2ecf20Sopenharmony_ci} 1558c2ecf20Sopenharmony_ci 1568c2ecf20Sopenharmony_ci#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ 1578c2ecf20Sopenharmony_cistatic inline int atomic_fetch_##op(int i, atomic_t *v) \ 1588c2ecf20Sopenharmony_ci{ \ 1598c2ecf20Sopenharmony_ci unsigned long flags; \ 1608c2ecf20Sopenharmony_ci unsigned long orig; \ 1618c2ecf20Sopenharmony_ci \ 1628c2ecf20Sopenharmony_ci /* \ 1638c2ecf20Sopenharmony_ci * spin lock/unlock provides the needed smp_mb() before/after \ 1648c2ecf20Sopenharmony_ci */ \ 1658c2ecf20Sopenharmony_ci atomic_ops_lock(flags); \ 1668c2ecf20Sopenharmony_ci orig = v->counter; \ 1678c2ecf20Sopenharmony_ci v->counter c_op i; \ 1688c2ecf20Sopenharmony_ci atomic_ops_unlock(flags); \ 1698c2ecf20Sopenharmony_ci \ 1708c2ecf20Sopenharmony_ci return orig; \ 1718c2ecf20Sopenharmony_ci} 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci#endif /* !CONFIG_ARC_HAS_LLSC */ 1748c2ecf20Sopenharmony_ci 1758c2ecf20Sopenharmony_ci#define ATOMIC_OPS(op, c_op, asm_op) \ 1768c2ecf20Sopenharmony_ci ATOMIC_OP(op, c_op, asm_op) \ 1778c2ecf20Sopenharmony_ci ATOMIC_OP_RETURN(op, c_op, asm_op) \ 1788c2ecf20Sopenharmony_ci ATOMIC_FETCH_OP(op, c_op, asm_op) 1798c2ecf20Sopenharmony_ci 1808c2ecf20Sopenharmony_ciATOMIC_OPS(add, +=, add) 1818c2ecf20Sopenharmony_ciATOMIC_OPS(sub, -=, sub) 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci#define atomic_andnot atomic_andnot 1848c2ecf20Sopenharmony_ci#define atomic_fetch_andnot atomic_fetch_andnot 1858c2ecf20Sopenharmony_ci 1868c2ecf20Sopenharmony_ci#undef ATOMIC_OPS 1878c2ecf20Sopenharmony_ci#define ATOMIC_OPS(op, c_op, asm_op) \ 1888c2ecf20Sopenharmony_ci ATOMIC_OP(op, c_op, asm_op) \ 1898c2ecf20Sopenharmony_ci ATOMIC_FETCH_OP(op, c_op, asm_op) 1908c2ecf20Sopenharmony_ci 1918c2ecf20Sopenharmony_ciATOMIC_OPS(and, &=, and) 1928c2ecf20Sopenharmony_ciATOMIC_OPS(andnot, &= ~, bic) 1938c2ecf20Sopenharmony_ciATOMIC_OPS(or, |=, or) 1948c2ecf20Sopenharmony_ciATOMIC_OPS(xor, ^=, xor) 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci#undef ATOMIC_OPS 1978c2ecf20Sopenharmony_ci#undef ATOMIC_FETCH_OP 1988c2ecf20Sopenharmony_ci#undef ATOMIC_OP_RETURN 1998c2ecf20Sopenharmony_ci#undef ATOMIC_OP 2008c2ecf20Sopenharmony_ci 2018c2ecf20Sopenharmony_ci#ifdef CONFIG_GENERIC_ATOMIC64 2028c2ecf20Sopenharmony_ci 2038c2ecf20Sopenharmony_ci#include <asm-generic/atomic64.h> 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_ci#else /* Kconfig ensures this is only enabled with needed h/w assist */ 2068c2ecf20Sopenharmony_ci 2078c2ecf20Sopenharmony_ci/* 2088c2ecf20Sopenharmony_ci * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD) 2098c2ecf20Sopenharmony_ci * - The address HAS to be 64-bit aligned 2108c2ecf20Sopenharmony_ci * - There are 2 semantics involved here: 2118c2ecf20Sopenharmony_ci * = exclusive implies no interim update between load/store to same addr 2128c2ecf20Sopenharmony_ci * = both words are observed/updated together: this is guaranteed even 2138c2ecf20Sopenharmony_ci * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set() 2148c2ecf20Sopenharmony_ci * is NOT required to use LLOCKD+SCONDD, STD suffices 2158c2ecf20Sopenharmony_ci */ 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_citypedef struct { 2188c2ecf20Sopenharmony_ci s64 __aligned(8) counter; 2198c2ecf20Sopenharmony_ci} atomic64_t; 2208c2ecf20Sopenharmony_ci 2218c2ecf20Sopenharmony_ci#define ATOMIC64_INIT(a) { (a) } 2228c2ecf20Sopenharmony_ci 2238c2ecf20Sopenharmony_cistatic inline s64 atomic64_read(const atomic64_t *v) 2248c2ecf20Sopenharmony_ci{ 2258c2ecf20Sopenharmony_ci s64 val; 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci __asm__ __volatile__( 2288c2ecf20Sopenharmony_ci " ldd %0, [%1] \n" 2298c2ecf20Sopenharmony_ci : "=r"(val) 2308c2ecf20Sopenharmony_ci : "r"(&v->counter)); 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci return val; 2338c2ecf20Sopenharmony_ci} 2348c2ecf20Sopenharmony_ci 2358c2ecf20Sopenharmony_cistatic inline void atomic64_set(atomic64_t *v, s64 a) 2368c2ecf20Sopenharmony_ci{ 2378c2ecf20Sopenharmony_ci /* 2388c2ecf20Sopenharmony_ci * This could have been a simple assignment in "C" but would need 2398c2ecf20Sopenharmony_ci * explicit volatile. Otherwise gcc optimizers could elide the store 2408c2ecf20Sopenharmony_ci * which borked atomic64 self-test 2418c2ecf20Sopenharmony_ci * In the inline asm version, memory clobber needed for exact same 2428c2ecf20Sopenharmony_ci * reason, to tell gcc about the store. 2438c2ecf20Sopenharmony_ci * 2448c2ecf20Sopenharmony_ci * This however is not needed for sibling atomic64_add() etc since both 2458c2ecf20Sopenharmony_ci * load/store are explicitly done in inline asm. As long as API is used 2468c2ecf20Sopenharmony_ci * for each access, gcc has no way to optimize away any load/store 2478c2ecf20Sopenharmony_ci */ 2488c2ecf20Sopenharmony_ci __asm__ __volatile__( 2498c2ecf20Sopenharmony_ci " std %0, [%1] \n" 2508c2ecf20Sopenharmony_ci : 2518c2ecf20Sopenharmony_ci : "r"(a), "r"(&v->counter) 2528c2ecf20Sopenharmony_ci : "memory"); 2538c2ecf20Sopenharmony_ci} 2548c2ecf20Sopenharmony_ci 2558c2ecf20Sopenharmony_ci#define ATOMIC64_OP(op, op1, op2) \ 2568c2ecf20Sopenharmony_cistatic inline void atomic64_##op(s64 a, atomic64_t *v) \ 2578c2ecf20Sopenharmony_ci{ \ 2588c2ecf20Sopenharmony_ci s64 val; \ 2598c2ecf20Sopenharmony_ci \ 2608c2ecf20Sopenharmony_ci __asm__ __volatile__( \ 2618c2ecf20Sopenharmony_ci "1: \n" \ 2628c2ecf20Sopenharmony_ci " llockd %0, [%1] \n" \ 2638c2ecf20Sopenharmony_ci " " #op1 " %L0, %L0, %L2 \n" \ 2648c2ecf20Sopenharmony_ci " " #op2 " %H0, %H0, %H2 \n" \ 2658c2ecf20Sopenharmony_ci " scondd %0, [%1] \n" \ 2668c2ecf20Sopenharmony_ci " bnz 1b \n" \ 2678c2ecf20Sopenharmony_ci : "=&r"(val) \ 2688c2ecf20Sopenharmony_ci : "r"(&v->counter), "ir"(a) \ 2698c2ecf20Sopenharmony_ci : "cc"); \ 2708c2ecf20Sopenharmony_ci} \ 2718c2ecf20Sopenharmony_ci 2728c2ecf20Sopenharmony_ci#define ATOMIC64_OP_RETURN(op, op1, op2) \ 2738c2ecf20Sopenharmony_cistatic inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ 2748c2ecf20Sopenharmony_ci{ \ 2758c2ecf20Sopenharmony_ci s64 val; \ 2768c2ecf20Sopenharmony_ci \ 2778c2ecf20Sopenharmony_ci smp_mb(); \ 2788c2ecf20Sopenharmony_ci \ 2798c2ecf20Sopenharmony_ci __asm__ __volatile__( \ 2808c2ecf20Sopenharmony_ci "1: \n" \ 2818c2ecf20Sopenharmony_ci " llockd %0, [%1] \n" \ 2828c2ecf20Sopenharmony_ci " " #op1 " %L0, %L0, %L2 \n" \ 2838c2ecf20Sopenharmony_ci " " #op2 " %H0, %H0, %H2 \n" \ 2848c2ecf20Sopenharmony_ci " scondd %0, [%1] \n" \ 2858c2ecf20Sopenharmony_ci " bnz 1b \n" \ 2868c2ecf20Sopenharmony_ci : [val] "=&r"(val) \ 2878c2ecf20Sopenharmony_ci : "r"(&v->counter), "ir"(a) \ 2888c2ecf20Sopenharmony_ci : "cc"); /* memory clobber comes from smp_mb() */ \ 2898c2ecf20Sopenharmony_ci \ 2908c2ecf20Sopenharmony_ci smp_mb(); \ 2918c2ecf20Sopenharmony_ci \ 2928c2ecf20Sopenharmony_ci return val; \ 2938c2ecf20Sopenharmony_ci} 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci#define ATOMIC64_FETCH_OP(op, op1, op2) \ 2968c2ecf20Sopenharmony_cistatic inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ 2978c2ecf20Sopenharmony_ci{ \ 2988c2ecf20Sopenharmony_ci s64 val, orig; \ 2998c2ecf20Sopenharmony_ci \ 3008c2ecf20Sopenharmony_ci smp_mb(); \ 3018c2ecf20Sopenharmony_ci \ 3028c2ecf20Sopenharmony_ci __asm__ __volatile__( \ 3038c2ecf20Sopenharmony_ci "1: \n" \ 3048c2ecf20Sopenharmony_ci " llockd %0, [%2] \n" \ 3058c2ecf20Sopenharmony_ci " " #op1 " %L1, %L0, %L3 \n" \ 3068c2ecf20Sopenharmony_ci " " #op2 " %H1, %H0, %H3 \n" \ 3078c2ecf20Sopenharmony_ci " scondd %1, [%2] \n" \ 3088c2ecf20Sopenharmony_ci " bnz 1b \n" \ 3098c2ecf20Sopenharmony_ci : "=&r"(orig), "=&r"(val) \ 3108c2ecf20Sopenharmony_ci : "r"(&v->counter), "ir"(a) \ 3118c2ecf20Sopenharmony_ci : "cc"); /* memory clobber comes from smp_mb() */ \ 3128c2ecf20Sopenharmony_ci \ 3138c2ecf20Sopenharmony_ci smp_mb(); \ 3148c2ecf20Sopenharmony_ci \ 3158c2ecf20Sopenharmony_ci return orig; \ 3168c2ecf20Sopenharmony_ci} 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci#define ATOMIC64_OPS(op, op1, op2) \ 3198c2ecf20Sopenharmony_ci ATOMIC64_OP(op, op1, op2) \ 3208c2ecf20Sopenharmony_ci ATOMIC64_OP_RETURN(op, op1, op2) \ 3218c2ecf20Sopenharmony_ci ATOMIC64_FETCH_OP(op, op1, op2) 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci#define atomic64_andnot atomic64_andnot 3248c2ecf20Sopenharmony_ci#define atomic64_fetch_andnot atomic64_fetch_andnot 3258c2ecf20Sopenharmony_ci 3268c2ecf20Sopenharmony_ciATOMIC64_OPS(add, add.f, adc) 3278c2ecf20Sopenharmony_ciATOMIC64_OPS(sub, sub.f, sbc) 3288c2ecf20Sopenharmony_ciATOMIC64_OPS(and, and, and) 3298c2ecf20Sopenharmony_ciATOMIC64_OPS(andnot, bic, bic) 3308c2ecf20Sopenharmony_ciATOMIC64_OPS(or, or, or) 3318c2ecf20Sopenharmony_ciATOMIC64_OPS(xor, xor, xor) 3328c2ecf20Sopenharmony_ci 3338c2ecf20Sopenharmony_ci#undef ATOMIC64_OPS 3348c2ecf20Sopenharmony_ci#undef ATOMIC64_FETCH_OP 3358c2ecf20Sopenharmony_ci#undef ATOMIC64_OP_RETURN 3368c2ecf20Sopenharmony_ci#undef ATOMIC64_OP 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_cistatic inline s64 3398c2ecf20Sopenharmony_ciatomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) 3408c2ecf20Sopenharmony_ci{ 3418c2ecf20Sopenharmony_ci s64 prev; 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci smp_mb(); 3448c2ecf20Sopenharmony_ci 3458c2ecf20Sopenharmony_ci __asm__ __volatile__( 3468c2ecf20Sopenharmony_ci "1: llockd %0, [%1] \n" 3478c2ecf20Sopenharmony_ci " brne %L0, %L2, 2f \n" 3488c2ecf20Sopenharmony_ci " brne %H0, %H2, 2f \n" 3498c2ecf20Sopenharmony_ci " scondd %3, [%1] \n" 3508c2ecf20Sopenharmony_ci " bnz 1b \n" 3518c2ecf20Sopenharmony_ci "2: \n" 3528c2ecf20Sopenharmony_ci : "=&r"(prev) 3538c2ecf20Sopenharmony_ci : "r"(ptr), "ir"(expected), "r"(new) 3548c2ecf20Sopenharmony_ci : "cc"); /* memory clobber comes from smp_mb() */ 3558c2ecf20Sopenharmony_ci 3568c2ecf20Sopenharmony_ci smp_mb(); 3578c2ecf20Sopenharmony_ci 3588c2ecf20Sopenharmony_ci return prev; 3598c2ecf20Sopenharmony_ci} 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_cistatic inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) 3628c2ecf20Sopenharmony_ci{ 3638c2ecf20Sopenharmony_ci s64 prev; 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci smp_mb(); 3668c2ecf20Sopenharmony_ci 3678c2ecf20Sopenharmony_ci __asm__ __volatile__( 3688c2ecf20Sopenharmony_ci "1: llockd %0, [%1] \n" 3698c2ecf20Sopenharmony_ci " scondd %2, [%1] \n" 3708c2ecf20Sopenharmony_ci " bnz 1b \n" 3718c2ecf20Sopenharmony_ci "2: \n" 3728c2ecf20Sopenharmony_ci : "=&r"(prev) 3738c2ecf20Sopenharmony_ci : "r"(ptr), "r"(new) 3748c2ecf20Sopenharmony_ci : "cc"); /* memory clobber comes from smp_mb() */ 3758c2ecf20Sopenharmony_ci 3768c2ecf20Sopenharmony_ci smp_mb(); 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci return prev; 3798c2ecf20Sopenharmony_ci} 3808c2ecf20Sopenharmony_ci 3818c2ecf20Sopenharmony_ci/** 3828c2ecf20Sopenharmony_ci * atomic64_dec_if_positive - decrement by 1 if old value positive 3838c2ecf20Sopenharmony_ci * @v: pointer of type atomic64_t 3848c2ecf20Sopenharmony_ci * 3858c2ecf20Sopenharmony_ci * The function returns the old value of *v minus 1, even if 3868c2ecf20Sopenharmony_ci * the atomic variable, v, was not decremented. 3878c2ecf20Sopenharmony_ci */ 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_cistatic inline s64 atomic64_dec_if_positive(atomic64_t *v) 3908c2ecf20Sopenharmony_ci{ 3918c2ecf20Sopenharmony_ci s64 val; 3928c2ecf20Sopenharmony_ci 3938c2ecf20Sopenharmony_ci smp_mb(); 3948c2ecf20Sopenharmony_ci 3958c2ecf20Sopenharmony_ci __asm__ __volatile__( 3968c2ecf20Sopenharmony_ci "1: llockd %0, [%1] \n" 3978c2ecf20Sopenharmony_ci " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n" 3988c2ecf20Sopenharmony_ci " sub.c %H0, %H0, 1 # if C set, w1 - 1\n" 3998c2ecf20Sopenharmony_ci " brlt %H0, 0, 2f \n" 4008c2ecf20Sopenharmony_ci " scondd %0, [%1] \n" 4018c2ecf20Sopenharmony_ci " bnz 1b \n" 4028c2ecf20Sopenharmony_ci "2: \n" 4038c2ecf20Sopenharmony_ci : "=&r"(val) 4048c2ecf20Sopenharmony_ci : "r"(&v->counter) 4058c2ecf20Sopenharmony_ci : "cc"); /* memory clobber comes from smp_mb() */ 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci smp_mb(); 4088c2ecf20Sopenharmony_ci 4098c2ecf20Sopenharmony_ci return val; 4108c2ecf20Sopenharmony_ci} 4118c2ecf20Sopenharmony_ci#define atomic64_dec_if_positive atomic64_dec_if_positive 4128c2ecf20Sopenharmony_ci 4138c2ecf20Sopenharmony_ci/** 4148c2ecf20Sopenharmony_ci * atomic64_fetch_add_unless - add unless the number is a given value 4158c2ecf20Sopenharmony_ci * @v: pointer of type atomic64_t 4168c2ecf20Sopenharmony_ci * @a: the amount to add to v... 4178c2ecf20Sopenharmony_ci * @u: ...unless v is equal to u. 4188c2ecf20Sopenharmony_ci * 4198c2ecf20Sopenharmony_ci * Atomically adds @a to @v, if it was not @u. 4208c2ecf20Sopenharmony_ci * Returns the old value of @v 4218c2ecf20Sopenharmony_ci */ 4228c2ecf20Sopenharmony_cistatic inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) 4238c2ecf20Sopenharmony_ci{ 4248c2ecf20Sopenharmony_ci s64 old, temp; 4258c2ecf20Sopenharmony_ci 4268c2ecf20Sopenharmony_ci smp_mb(); 4278c2ecf20Sopenharmony_ci 4288c2ecf20Sopenharmony_ci __asm__ __volatile__( 4298c2ecf20Sopenharmony_ci "1: llockd %0, [%2] \n" 4308c2ecf20Sopenharmony_ci " brne %L0, %L4, 2f # continue to add since v != u \n" 4318c2ecf20Sopenharmony_ci " breq.d %H0, %H4, 3f # return since v == u \n" 4328c2ecf20Sopenharmony_ci "2: \n" 4338c2ecf20Sopenharmony_ci " add.f %L1, %L0, %L3 \n" 4348c2ecf20Sopenharmony_ci " adc %H1, %H0, %H3 \n" 4358c2ecf20Sopenharmony_ci " scondd %1, [%2] \n" 4368c2ecf20Sopenharmony_ci " bnz 1b \n" 4378c2ecf20Sopenharmony_ci "3: \n" 4388c2ecf20Sopenharmony_ci : "=&r"(old), "=&r" (temp) 4398c2ecf20Sopenharmony_ci : "r"(&v->counter), "r"(a), "r"(u) 4408c2ecf20Sopenharmony_ci : "cc"); /* memory clobber comes from smp_mb() */ 4418c2ecf20Sopenharmony_ci 4428c2ecf20Sopenharmony_ci smp_mb(); 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci return old; 4458c2ecf20Sopenharmony_ci} 4468c2ecf20Sopenharmony_ci#define atomic64_fetch_add_unless atomic64_fetch_add_unless 4478c2ecf20Sopenharmony_ci 4488c2ecf20Sopenharmony_ci#endif /* !CONFIG_GENERIC_ATOMIC64 */ 4498c2ecf20Sopenharmony_ci 4508c2ecf20Sopenharmony_ci#endif /* !__ASSEMBLY__ */ 4518c2ecf20Sopenharmony_ci 4528c2ecf20Sopenharmony_ci#endif 453