From d256545b2fd62d78910efcc6273c3b70abd3aa13 Mon Sep 17 00:00:00 2001 From: laforge Date: Tue, 12 Sep 2006 17:35:30 +0000 Subject: move to new directory git-svn-id: https://svn.openpcd.org:2342/trunk@191 6dc7ffe9-61d6-0310-9af1-9938baff3ed1 --- firmware/include/asm/atomic.h | 106 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 firmware/include/asm/atomic.h (limited to 'firmware/include/asm/atomic.h') diff --git a/firmware/include/asm/atomic.h b/firmware/include/asm/atomic.h new file mode 100644 index 0000000..19e8ce6 --- /dev/null +++ b/firmware/include/asm/atomic.h @@ -0,0 +1,106 @@ +/* + * linux/include/asm-arm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) + +#include +#include + +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val += i; + local_irq_restore(flags); + + return val; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val -= i; + local_irq_restore(flags); + + return val; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + local_irq_save(flags); + *addr &= ~mask; + local_irq_restore(flags); +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif -- cgit v1.2.3