16 #ifndef __SYS_ATOMIC_H__ 17 #define __SYS_ATOMIC_H__ 19 #include <ppu-types.h> 21 typedef struct {
volatile u32 counter; }
atomic_t;
24 static inline u32 sysAtomicRead(
const atomic_t *v)
28 __asm__
volatile(
"lwz%U1%X1 %0,%1" :
"=r"(t) :
"m"(v->counter));
33 static inline void sysAtomicSet(
atomic_t *v,
int i)
35 __asm__
volatile(
"stw%U0%X0 %1,%0" :
"=m"(v->counter) :
"r"(i));
38 static inline void sysAtomicAdd(u32 a,
atomic_t *v)
43 "1: lwarx %0,0,%3 # atomic_add\n\ 47 :
"=&r" (t),
"+m" (v->counter)
48 :
"r" (a),
"r" (&v->counter)
52 static inline u32 sysAtomicAddReturn(u32 a,
atomic_t *v)
57 "1: lwarx %0,0,%2 # atomic_add_return\n\ 62 :
"r" (a),
"r" (&v->counter)
68 #define sysAtomicAddNegative(a, v) (sysAtomicAddReturn((a), (v)) < 0) 70 static inline void sysAtomicSub(u32 a,
atomic_t *v)
75 "1: lwarx %0,0,%3 # atomic_sub\n\ 79 :
"=&r" (t),
"+m" (v->counter)
80 :
"r" (a),
"r" (&v->counter)
84 static inline u32 sysAtomicSubReturn(u32 a,
atomic_t *v)
89 "1: lwarx %0,0,%2 # atomic_sub_return\n\ 94 :
"r" (a),
"r" (&v->counter)
100 static inline void sysAtomicInc(
atomic_t *v)
105 "1: lwarx %0,0,%2 # atomic_inc\n\ 109 :
"=&r" (t),
"+m" (v->counter)
114 static inline u32 sysAtomicIncReturn(
atomic_t *v)
119 "1: lwarx %0,0,%1 # atomic_inc_return\n\ 125 :
"cc",
"xer",
"memory");
138 #define sysAtomicIncAndTest(v) (sysAtomicIncReturn(v) == 0) 140 static inline void sysAtomicDec(
atomic_t *v)
145 "1: lwarx %0,0,%2 # atomic_dec\n\ 149 :
"=&r" (t),
"+m" (v->counter)
154 static inline u32 sysAtomicDecReturn(
atomic_t *v)
159 "1: lwarx %0,0,%1 # atomic_dec_return\n\ 165 :
"cc",
"xer",
"memory");
176 static inline u32 __xchg_u32(
volatile void *p, u32 val)
181 "1: lwarx %0,0,%2 \n" 184 :
"=&r" (prev),
"+m" (*(
volatile unsigned int *)p)
191 static inline u32 __xchg_u64(
volatile void *p, u32 val)
196 "1: ldarx %0,0,%2 \n" 199 :
"=&r" (prev),
"+m" (*(
volatile u32 *)p)
210 extern void __xchg_called_with_bad_pointer(
void);
212 static inline u32 __xchg(
volatile void *ptr, u32 x,
unsigned int size)
216 return __xchg_u32(ptr, x);
218 return __xchg_u64(ptr, x);
220 __xchg_called_with_bad_pointer();
224 #define xchg(ptr,x) \ 226 __typeof__(*(ptr)) _x_ = (x); \ 227 (__typeof__(*(ptr))) __xchg((ptr), (u32)_x_, sizeof(*(ptr))); \ 235 __cmpxchg_u32(
volatile unsigned int *p, u64 old, u64
new)
240 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ 247 :
"=&r" (prev),
"+m" (*p)
248 :
"r" (p),
"r" (old),
"r" (
new)
255 __cmpxchg_u64(
volatile u64 *p, u64 old, u64
new)
260 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ 267 :
"=&r" (prev),
"+m" (*p)
268 :
"r" (p),
"r" (old),
"r" (
new)
276 extern void __cmpxchg_called_with_bad_pointer(
void);
279 __cmpxchg(
volatile void *ptr, u64 old, u64
new,
284 return __cmpxchg_u32(ptr, old,
new);
286 return __cmpxchg_u64(ptr, old,
new);
288 __cmpxchg_called_with_bad_pointer();
292 #define cmpxchg(ptr, o, n) \ 294 __typeof__(*(ptr)) _o_ = (o); \ 295 __typeof__(*(ptr)) _n_ = (n); \ 296 (__typeof__(*(ptr))) __cmpxchg((ptr), (u64)_o_, \ 297 (u64)_n_, sizeof(*(ptr))); \ 300 #define sysAtomicCompareAndSwap(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 301 #define sysAtomicSwap(v, new) (xchg(&((v)->counter), new)) 317 "1: lwarx %0,0,%1 # atomic_add_unless\n\ 326 :
"r" (&v->counter),
"r" (a),
"r" (u)
332 #define sysAtomicIncNotZero(v) sysAtomicAddUnless((v), 1, 0) 334 #define sysAtomicSubAndTest(a, v) (sysAtomicSubReturn((a), (v)) == 0) 335 #define sysAtomicDecAndTest(v) (sysAtomicDecReturn((v)) == 0) 342 static inline u32 sysAtomicDecIfPositive(
atomic_t *v)
347 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 361 static inline u64 sysAtomic64Read(
const atomic64_t *v)
365 __asm__
volatile(
"ld%U1%X1 %0,%1" :
"=r"(t) :
"m"(v->counter));
370 static inline void sysAtomic64Set(
atomic64_t *v, u64 i)
372 __asm__
volatile(
"std%U0%X0 %1,%0" :
"=m"(v->counter) :
"r"(i));
375 static inline void sysAtomic64Add(u64 a,
atomic64_t *v)
380 "1: ldarx %0,0,%3 # atomic64_add\n\ 384 :
"=&r" (t),
"+m" (v->counter)
385 :
"r" (a),
"r" (&v->counter)
389 static inline u64 sysAtomic64AddReturn(u64 a,
atomic64_t *v)
394 "1: ldarx %0,0,%2 # atomic64_add_return\n\ 399 :
"r" (a),
"r" (&v->counter)
405 #define sysAtomic64AddNegative(a, v) (sysAtomic64AddReturn((a), (v)) < 0) 407 static inline void sysAtomic64Sub(u64 a,
atomic64_t *v)
412 "1: ldarx %0,0,%3 # atomic64_sub\n\ 416 :
"=&r" (t),
"+m" (v->counter)
417 :
"r" (a),
"r" (&v->counter)
421 static inline u64 sysAtomic64SubReturn(u64 a,
atomic64_t *v)
426 "1: ldarx %0,0,%2 # atomic64_sub_return\n\ 431 :
"r" (a),
"r" (&v->counter)
437 static inline void sysAtomic64Inc(
atomic64_t *v)
442 "1: ldarx %0,0,%2 # atomic64_inc\n\ 446 :
"=&r" (t),
"+m" (v->counter)
451 static inline u64 sysAtomic64IncReturn(
atomic64_t *v)
456 "1: ldarx %0,0,%1 # atomic64_inc_return\n\ 462 :
"cc",
"xer",
"memory");
475 #define sysAtomic64IncAndTest(v) (sysAtomic64IncReturn(v) == 0) 477 static inline void sysAtomic64Dec(
atomic64_t *v)
482 "1: ldarx %0,0,%2 # atomic64_dec\n\ 486 :
"=&r" (t),
"+m" (v->counter)
491 static inline u64 sysAtomic64DecReturn(
atomic64_t *v)
496 "1: ldarx %0,0,%1 # atomic64_dec_return\n\ 502 :
"cc",
"xer",
"memory");
507 #define sysAtomic64SubAndTest(a, v) (sysAtomic64SubReturn((a), (v)) == 0) 508 #define sysAtomic64DecAndTest(v) (sysAtomic64DecReturn((v)) == 0) 514 static inline u64 sysAtomic64DecIfPositive(
atomic64_t *v)
519 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ 527 :
"cc",
"xer",
"memory");
532 #define sysAtomic64CompareAndSwap(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 533 #define sysAtomic64Swap(v, new) (xchg(&((v)->counter), new)) 549 "1: ldarx %0,0,%1 # atomic_add_unless\n\ 558 :
"r" (&v->counter),
"r" (a),
"r" (u)
564 #define sysAtomic64IncNotZero(v) sysAtomic64AddUnless((v), 1, 0)
static u32 sysAtomicAddUnless(atomic_t *v, u32 a, int u)
Definition: atomic.h:312
static u32 sysAtomic64AddUnless(atomic64_t *v, u64 a, u64 u)
Definition: atomic.h:544