diff options
Diffstat (limited to 'firmware/include/asm')
-rw-r--r-- | firmware/include/asm/assembler.h | 97 | ||||
-rw-r--r-- | firmware/include/asm/atomic.h | 106 | ||||
-rw-r--r-- | firmware/include/asm/bitops.h | 225 | ||||
-rw-r--r-- | firmware/include/asm/compiler.h | 7 | ||||
-rw-r--r-- | firmware/include/asm/ctype.h | 54 | ||||
-rw-r--r-- | firmware/include/asm/div64.h | 48 | ||||
-rw-r--r-- | firmware/include/asm/linkage.h | 18 | ||||
-rw-r--r-- | firmware/include/asm/ptrace.h | 128 | ||||
-rw-r--r-- | firmware/include/asm/system.h | 109 |
9 files changed, 792 insertions, 0 deletions
diff --git a/firmware/include/asm/assembler.h b/firmware/include/asm/assembler.h new file mode 100644 index 0000000..b43f9d1 --- /dev/null +++ b/firmware/include/asm/assembler.h @@ -0,0 +1,97 @@ +/* + * linux/include/asm-arm/assembler.h + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains arm architecture specific defines + * for the different processors. + * + * Do not include any C declarations in this file - it is included by + * assembler source. + */ +#ifndef __ASSEMBLY__ +#error "Only include this from assembly code" +#endif + +#include <asm/ptrace.h> + +#define pull lsl +#define push lsr +#define get_byte_0 lsr #24 +#define get_byte_1 lsr #16 +#define get_byte_2 lsr #8 +#define get_byte_3 lsl #0 +#define put_byte_0 lsl #24 +#define put_byte_1 lsl #16 +#define put_byte_2 lsl #8 +#define put_byte_3 lsl #0 + +#define PLD(code...) + +#define MODE_USR USR_MODE +#define MODE_FIQ FIQ_MODE +#define MODE_IRQ IRQ_MODE +#define MODE_SVC SVC_MODE + +#define DEFAULT_FIQ MODE_FIQ + +/* + * LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc}) + */ +#ifdef __STDC__ +#define LOADREGS(cond, base, reglist...)\ + ldm##cond base,reglist +#else +#define LOADREGS(cond, base, reglist...)\ + ldm/**/cond base,reglist +#endif + +/* + * Build a return instruction for this processor type. + */ +#define RETINSTR(instr, regs...)\ + instr regs + +/* + * Enable and disable interrupts + */ + .macro disable_irq + msr cpsr_c, #PSR_I_BIT | SVC_MODE + .endm + + .macro enable_irq + msr cpsr_c, #SVC_MODE + .endm + +/* + * Save the current IRQ state and disable IRQs. Note that this macro + * assumes FIQs are enabled, and that the processor is in SVC mode. + */ + .macro save_and_disable_irqs, oldcpsr + mrs \oldcpsr, cpsr + disable_irq + .endm + +/* + * Restore interrupt state previously stored in a register. We don't + * guarantee that this will preserve the flags. + */ + .macro restore_irqs, oldcpsr + msr cpsr_c, \oldcpsr + .endm + +/* + * These two are used to save LR/restore PC over a user-based access. + * The old 26-bit architecture requires that we do. On 32-bit + * architecture, we can safely ignore this requirement. + */ + .macro save_lr + .endm + + .macro restore_pc + mov pc, lr + .endm diff --git a/firmware/include/asm/atomic.h b/firmware/include/asm/atomic.h new file mode 100644 index 0000000..19e8ce6 --- /dev/null +++ b/firmware/include/asm/atomic.h @@ -0,0 +1,106 @@ +/* + * linux/include/asm-arm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) + +#include <asm/system.h> +#include <asm/compiler.h> + +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val += i; + local_irq_restore(flags); + + return val; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + local_irq_save(flags); + val = v->counter; + v->counter = val -= i; + local_irq_restore(flags); + + return val; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + + return ret; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + local_irq_save(flags); + *addr &= ~mask; + local_irq_restore(flags); +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif diff --git a/firmware/include/asm/bitops.h b/firmware/include/asm/bitops.h new file mode 100644 index 0000000..337d800 --- /dev/null +++ b/firmware/include/asm/bitops.h @@ -0,0 +1,225 @@ +/* + * Copyright 1995, Russell King. + * Various bits and pieces copyrights include: + * Linus Torvalds (test_bit). + * Big endian support: Copyright 2001, Nicolas Pitre + * reworked by rmk. + * + * bit 0 is the LSB of an "unsigned long" quantity. + * + * Please note that the code in this file should never be included + * from user space. Many of these are not implemented in assembler + * since they would be too costly. Also, they require privileged + * instructions (which are not available from user mode) to ensure + * that they are atomic. + */ + +#ifndef __ASM_ARM_BITOPS_H +#define __ASM_ARM_BITOPS_H + +#include <asm/system.h> + +#define smp_mb__before_clear_bit() mb() +#define smp_mb__after_clear_bit() mb() + +/* + * These functions are the basis of our bit ops. + * + * First, the atomic bitops. These use native endian. + */ +static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p |= mask; + local_irq_restore(flags); +} + +static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p &= ~mask; + local_irq_restore(flags); +} + +static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + *p ^= mask; + local_irq_restore(flags); +} + +static inline int +____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res | mask; + local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res & ~mask; + local_irq_restore(flags); + + return res & mask; +} + +static inline int +____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long flags; + unsigned int res; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + local_irq_save(flags); + res = *p; + *p = res ^ mask; + local_irq_restore(flags); + + return res & mask; +} + +//#include <asm-generic/bitops/non-atomic.h> + +/* + * A note about Endian-ness. + * ------------------------- + * + * When the ARM is put into big endian mode via CR15, the processor + * merely swaps the order of bytes within words, thus: + * + * ------------ physical data bus bits ----------- + * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0 + * little byte 3 byte 2 byte 1 byte 0 + * big byte 0 byte 1 byte 2 byte 3 + * + * This means that reading a 32-bit word at address 0 returns the same + * value irrespective of the endian mode bit. + * + * Peripheral devices should be connected with the data bus reversed in + * "Big Endian" mode. ARM Application Note 61 is applicable, and is + * available from http://www.arm.com/. + * + * The following assumes that the data bus connectivity for big endian + * mode has been followed. + * + * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. + */ + +/* + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. + */ +extern void _set_bit_le(int nr, volatile unsigned long * p); +extern void _clear_bit_le(int nr, volatile unsigned long * p); +extern void _change_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); +extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); +extern int _find_first_zero_bit_le(const void * p, unsigned size); +extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_bit_le(const unsigned long *p, int size, int offset); + +/* + * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. + */ +extern void _set_bit_be(int nr, volatile unsigned long * p); +extern void _clear_bit_be(int nr, volatile unsigned long * p); +extern void _change_bit_be(int nr, volatile unsigned long * p); +extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); +extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); +extern int _find_first_zero_bit_be(const void * p, unsigned size); +extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_bit_be(const unsigned long *p, int size, int offset); + +/* + * The __* form of bitops are non-atomic and may be reordered. + */ +#define ATOMIC_BITOP_LE(name,nr,p) \ + (__builtin_constant_p(nr) ? \ + ____atomic_##name(nr, p) : \ + _##name##_le(nr,p)) + +#define ATOMIC_BITOP_BE(name,nr,p) \ + (__builtin_constant_p(nr) ? \ + ____atomic_##name(nr, p) : \ + _##name##_be(nr,p)) + +#define NONATOMIC_BITOP(name,nr,p) \ + (____nonatomic_##name(nr, p)) + +/* + * These are the little endian, atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) +#define find_first_bit(p,sz) _find_first_bit_le(p,sz) +#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) + +#define WORD_BITOFF_TO_LE(x) ((x)) + +#if 0 +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/ffs.h> + +#include <asm-generic/bitops/fls64.h> + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#endif + +#define BITS_PER_LONG 32 +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* _ARM_BITOPS_H */ diff --git a/firmware/include/asm/compiler.h b/firmware/include/asm/compiler.h new file mode 100644 index 0000000..de4dfaa --- /dev/null +++ b/firmware/include/asm/compiler.h @@ -0,0 +1,7 @@ +#ifndef _ASM_COMPILER_H +#define _ASM_COMPILER_H + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + +#endif diff --git a/firmware/include/asm/ctype.h b/firmware/include/asm/ctype.h new file mode 100644 index 0000000..afa3639 --- /dev/null +++ b/firmware/include/asm/ctype.h @@ -0,0 +1,54 @@ +#ifndef _LINUX_CTYPE_H +#define _LINUX_CTYPE_H + +/* + * NOTE! This ctype does not handle EOF like the standard C + * library is required to. + */ + +#define _U 0x01 /* upper */ +#define _L 0x02 /* lower */ +#define _D 0x04 /* digit */ +#define _C 0x08 /* cntrl */ +#define _P 0x10 /* punct */ +#define _S 0x20 /* white space (space/lf/tab) */ +#define _X 0x40 /* hex digit */ +#define _SP 0x80 /* hard space (0x20) */ + +extern unsigned char _ctype[]; + +#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) + +#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) +#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) +#define iscntrl(c) ((__ismask(c)&(_C)) != 0) +#define isdigit(c) ((__ismask(c)&(_D)) != 0) +#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) +#define islower(c) ((__ismask(c)&(_L)) != 0) +#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define ispunct(c) ((__ismask(c)&(_P)) != 0) +#define isspace(c) ((__ismask(c)&(_S)) != 0) +#define isupper(c) ((__ismask(c)&(_U)) != 0) +#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) + +#define isascii(c) (((unsigned char)(c))<=0x7f) +#define toascii(c) (((unsigned char)(c))&0x7f) + +static inline unsigned char __tolower(unsigned char c) +{ + if (isupper(c)) + c -= 'A'-'a'; + return c; +} + +static inline unsigned char __toupper(unsigned char c) +{ + if (islower(c)) + c -= 'a'-'A'; + return c; +} + +#define tolower(c) __tolower(c) +#define toupper(c) __toupper(c) + +#endif diff --git a/firmware/include/asm/div64.h b/firmware/include/asm/div64.h new file mode 100644 index 0000000..3682616 --- /dev/null +++ b/firmware/include/asm/div64.h @@ -0,0 +1,48 @@ +#ifndef __ASM_ARM_DIV64 +#define __ASM_ARM_DIV64 + +#include <asm/system.h> + +/* + * The semantics of do_div() are: + * + * uint32_t do_div(uint64_t *n, uint32_t base) + * { + * uint32_t remainder = *n % base; + * *n = *n / base; + * return remainder; + * } + * + * In other words, a 64-bit dividend with a 32-bit divisor producing + * a 64-bit result and a 32-bit remainder. To accomplish this optimally + * we call a special __do_div64 helper with completely non standard + * calling convention for arguments and results (beware). + */ + +#ifdef __ARMEB__ +#define __xh "r0" +#define __xl "r1" +#else +#define __xl "r0" +#define __xh "r1" +#endif + +#define do_div(n,base) \ +({ \ + register unsigned int __base asm("r4") = base; \ + register unsigned long long __n asm("r0") = n; \ + register unsigned long long __res asm("r2"); \ + register unsigned int __rem asm(__xh); \ + asm( __asmeq("%0", __xh) \ + __asmeq("%1", "r2") \ + __asmeq("%2", "r0") \ + __asmeq("%3", "r4") \ + "bl __do_div64" \ + : "=r" (__rem), "=r" (__res) \ + : "r" (__n), "r" (__base) \ + : "ip", "lr", "cc"); \ + n = __res; \ + __rem; \ +}) + +#endif diff --git a/firmware/include/asm/linkage.h b/firmware/include/asm/linkage.h new file mode 100644 index 0000000..ac1c900 --- /dev/null +++ b/firmware/include/asm/linkage.h @@ -0,0 +1,18 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +/* asm-arm/linkage.h */ + +#define __ALIGN .align 0 +#define __ALIGN_STR ".align 0" + +/* linux/linkage.h */ + +#define ALIGN __ALIGN + +#define ENTRY(name) \ + .globl name; \ + ALIGN; \ + name: + +#endif diff --git a/firmware/include/asm/ptrace.h b/firmware/include/asm/ptrace.h new file mode 100644 index 0000000..f3a654e --- /dev/null +++ b/firmware/include/asm/ptrace.h @@ -0,0 +1,128 @@ +/* + * linux/include/asm-arm/ptrace.h + * + * Copyright (C) 1996-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_PTRACE_H +#define __ASM_ARM_PTRACE_H + +/* + * PSR bits + */ +#define USR26_MODE 0x00000000 +#define FIQ26_MODE 0x00000001 +#define IRQ26_MODE 0x00000002 +#define SVC26_MODE 0x00000003 +#define USR_MODE 0x00000010 +#define FIQ_MODE 0x00000011 +#define IRQ_MODE 0x00000012 +#define SVC_MODE 0x00000013 +#define ABT_MODE 0x00000017 +#define UND_MODE 0x0000001b +#define SYSTEM_MODE 0x0000001f +#define MODE32_BIT 0x00000010 +#define MODE_MASK 0x0000001f +#define PSR_T_BIT 0x00000020 +#define PSR_F_BIT 0x00000040 +#define PSR_I_BIT 0x00000080 +#define PSR_J_BIT 0x01000000 +#define PSR_Q_BIT 0x08000000 +#define PSR_V_BIT 0x10000000 +#define PSR_C_BIT 0x20000000 +#define PSR_Z_BIT 0x40000000 +#define PSR_N_BIT 0x80000000 +#define PCMASK 0 + +/* + * Groups of PSR bits + */ +#define PSR_f 0xff000000 /* Flags */ +#define PSR_s 0x00ff0000 /* Status */ +#define PSR_x 0x0000ff00 /* Extension */ +#define PSR_c 0x000000ff /* Control */ + +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * stack during a system call. Note that sizeof(struct pt_regs) + * has to be a multiple of 8. + */ +struct pt_regs { + long uregs[18]; +}; + +#define ARM_cpsr uregs[16] +#define ARM_pc uregs[15] +#define ARM_lr uregs[14] +#define ARM_sp uregs[13] +#define ARM_ip uregs[12] +#define ARM_fp uregs[11] +#define ARM_r10 uregs[10] +#define ARM_r9 uregs[9] +#define ARM_r8 uregs[8] +#define ARM_r7 uregs[7] +#define ARM_r6 uregs[6] +#define ARM_r5 uregs[5] +#define ARM_r4 uregs[4] +#define ARM_r3 uregs[3] +#define ARM_r2 uregs[2] +#define ARM_r1 uregs[1] +#define ARM_r0 uregs[0] +#define ARM_ORIG_r0 uregs[17] + +#define user_mode(regs) \ + (((regs)->ARM_cpsr & 0xf) == 0) + +#ifdef CONFIG_ARM_THUMB +#define thumb_mode(regs) \ + (((regs)->ARM_cpsr & PSR_T_BIT)) +#else +#define thumb_mode(regs) (0) +#endif + +#define processor_mode(regs) \ + ((regs)->ARM_cpsr & MODE_MASK) + +#define interrupts_enabled(regs) \ + (!((regs)->ARM_cpsr & PSR_I_BIT)) + +#define fast_interrupts_enabled(regs) \ + (!((regs)->ARM_cpsr & PSR_F_BIT)) + +#define condition_codes(regs) \ + ((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT)) + +/* Are the current registers suitable for user mode? + * (used to maintain security in signal handlers) + */ +static inline int valid_user_regs(struct pt_regs *regs) +{ + if (user_mode(regs) && + (regs->ARM_cpsr & (PSR_F_BIT|PSR_I_BIT)) == 0) + return 1; + + /* + * Force CPSR to something logical... + */ + regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; + + return 0; +} + +#define pc_pointer(v) \ + ((v) & ~PCMASK) + +#define instruction_pointer(regs) \ + (pc_pointer((regs)->ARM_pc)) + +#define profile_pc(regs) instruction_pointer(regs) + +#endif /* __ASSEMBLY__ */ + +#endif + diff --git a/firmware/include/asm/system.h b/firmware/include/asm/system.h new file mode 100644 index 0000000..2bf0cc5 --- /dev/null +++ b/firmware/include/asm/system.h @@ -0,0 +1,109 @@ +#ifndef __ASM_ARM_SYSTEM_H +#define __ASM_ARM_SYSTEM_H + +/* Generic ARM7TDMI (ARMv4T) synchronisation primitives, mostly + * taken from Linux kernel source, licensed under GPL */ + +#define local_irq_save(x) \ + ({ \ + unsigned long temp; \ + (void) (&temp == &x); \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_irq_save\n" \ +" orr %1, %0, #128\n" \ +" msr cpsr_c, %1" \ + : "=r" (x), "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Enable IRQs + */ +#define local_irq_enable() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_irq_enable\n" \ +" bic %0, %0, #128\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Disable IRQs + */ +#define local_irq_disable() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_irq_disable\n" \ +" orr %0, %0, #128\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Enable FIQs + */ +#define local_fiq_enable() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ stf\n" \ +" bic %0, %0, #64\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Disable FIQs + */ +#define local_fiq_disable() \ + ({ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ clf\n" \ +" orr %0, %0, #64\n" \ +" msr cpsr_c, %0" \ + : "=r" (temp) \ + : \ + : "memory", "cc"); \ + }) + +/* + * Save the current interrupt enable state. + */ +#define local_save_flags(x) \ + ({ \ + __asm__ __volatile__( \ + "mrs %0, cpsr @ local_save_flags" \ + : "=r" (x) : : "memory", "cc"); \ + }) + +/* + * restore saved IRQ & FIQ state + */ +#define local_irq_restore(x) \ + __asm__ __volatile__( \ + "msr cpsr_c, %0 @ local_irq_restore\n" \ + : \ + : "r" (x) \ + : "memory", "cc") + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + (int)(flags & PSR_I_BIT); \ +}) + +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + +#endif |