From 529dec15d809c6bff8031344f2075de98872395d Mon Sep 17 00:00:00 2001 From: Richard Earnshaw Date: Tue, 5 Dec 2000 10:36:33 +0000 Subject: [PATCH] atomicity.h: Add support for compiling Thumb code. * config/cpu/arm/bits/atomicity.h: Add support for compiling Thumb code. From-SVN: r38033 --- libstdc++-v3/ChangeLog | 5 + libstdc++-v3/config/cpu/arm/bits/atomicity.h | 183 ++++++++++++++++--- 2 files changed, 165 insertions(+), 23 deletions(-) diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 5e2a9a33118..927ae79307e 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,8 @@ +2000-12-05 Richard Earnshaw + + * config/cpu/arm/bits/atomicity.h: Add support for compiling Thumb + code. + 2000-12-05 Richard Earnshaw Support for NetBSD. diff --git a/libstdc++-v3/config/cpu/arm/bits/atomicity.h b/libstdc++-v3/config/cpu/arm/bits/atomicity.h index 32987757b12..e933ca4688c 100644 --- a/libstdc++-v3/config/cpu/arm/bits/atomicity.h +++ b/libstdc++-v3/config/cpu/arm/bits/atomicity.h @@ -27,19 +27,46 @@ __attribute__ ((__unused__)) __exchange_and_add (volatile _Atomic_word* __mem, int __val) { _Atomic_word __tmp, __tmp2, __result; +#ifdef __thumb__ + /* Since this function is inlined, we can't be sure of the alignment. */ + __asm__ __volatile__ ( + "ldr %0, 4f \n\t" + "bx %0 \n\t" + ".align 0 \n" + "4:\t" + ".word 0f \n\t" + ".code 32 \n" + "0:\t" + "ldr %0, [%3] \n\t" + "add %1, %0, %4 \n\t" + "swp %2, %1, [%3] \n\t" + "cmp %0, %2 \n\t" + "swpne %1, %2, [%3] \n\t" + "bne 0b \n\t" + "ldr %1, 1f \n\t" + "bx %1 \n" + "1:\t" + ".word 2f \n\t" + ".code 16 \n" + "2:\n" + : "=&l"(__result), "=&r"(__tmp), "=&r"(__tmp2) + : "r" (__mem), "r"(__val) + : "cc", "memory"); +#else __asm__ __volatile__ ( "\n" "0:\t" - "ldr %0,[%3] \n\t" - "add %1,%0,%4 \n\t" - "swp %2,%1,[%3] \n\t" - "cmp %0,%2 \n\t" - "swpne %1,%2,[%3] \n\t" + "ldr %0, [%3] \n\t" + "add %1, %0, %4 \n\t" + "swp %2, %1, [%3] \n\t" + "cmp %0, %2 \n\t" + "swpne %1, %2, [%3] \n\t" "bne 0b \n\t" "" : "=&r"(__result), "=&r"(__tmp), "=&r"(__tmp2) : "r" (__mem), "r"(__val) : "cc", "memory"); +#endif return __result; } @@ -48,19 +75,46 @@ __attribute__ ((__unused__)) __atomic_add (volatile _Atomic_word *__mem, int __val) { _Atomic_word __tmp, __tmp2, __tmp3; +#ifdef __thumb__ + /* Since this function is inlined, we can't be sure of the alignment. */ + __asm__ __volatile__ ( + "ldr %0, 4f \n\t" + "bx %0 \n\t" + ".align 0\n" + "4:\t" + ".word 0f \n\t" + ".code 32 \n" + "0:\t" + "ldr %0, [%3] \n\t" + "add %1, %0, %4 \n\t" + "swp %2, %1, [%3] \n\t" + "cmp %0, %2 \n\t" + "swpne %1, %2,[%3] \n\t" + "bne 0b \n\t" + "ldr %1, 1f \n\t" + "bx %1 \n" + "1:\t" + ".word 2f \n\t" + ".code 16 \n" + "2:\n" + : "=&l"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3) + : "r" (__mem), "r"(__val) + : "cc", "memory"); +#else __asm__ __volatile__ ( "\n" "0:\t" - "ldr %0,[%3] \n\t" - "add %1,%0,%4 \n\t" - "swp %2,%1,[%3] \n\t" - "cmp %0,%2 \n\t" - "swpne %1,%2,[%3] \n\t" + "ldr %0, [%3] \n\t" + "add %1, %0, %4 \n\t" + "swp %2, %1, [%3] \n\t" + "cmp %0, %2 \n\t" + "swpne %1, %2, [%3] \n\t" "bne 0b \n\t" "" : "=&r"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3) : "r" (__mem), "r"(__val) : "cc", "memory"); +#endif } static inline int @@ -69,23 +123,54 @@ __compare_and_swap (volatile long *__p, long __oldval, long __newval) { int __result; long __tmp; +#ifdef __thumb__ + /* Since this function is inlined, we can't be sure of the alignment. */ + __asm__ __volatile__ ( + "ldr %0, 4f \n\t" + "bx %0 \n\t" + ".align 0 \n" + "4:\t" + ".word 0f \n\t" + ".code 32 \n" + "0:\t" + "ldr %1, [%2] \n\t" + "mov %0, #0 \n\t" + "cmp %1, %4 \n\t" + "bne 1f \n\t" + "swp %0, %3, [%2] \n\t" + "cmp %1, %0 \n\t" + "swpne %1, %0, [%2] \n\t" + "bne 0b \n\t" + "mov %0, #1 \n" + "1:\t" + "ldr %1, 2f \n\t" + "bx %1 \n" + "2:\t" + ".word 3f \n\t" + ".code 16\n" + "3:\n" + : "=&l"(__result), "=&r"(__tmp) + : "r" (__p), "r" (__newval), "r" (__oldval) + : "cc", "memory"); +#else __asm__ __volatile__ ( "\n" "0:\t" - "ldr %1,[%2] \n\t" - "mov %0,#0 \n\t" - "cmp %1,%4 \n\t" + "ldr %1, [%2] \n\t" + "mov %0, #0 \n\t" + "cmp %1, %4 \n\t" "bne 1f \n\t" - "swp %0,%3,[%2] \n\t" - "cmp %1,%0 \n\t" - "swpne %1,%0,[%2] \n\t" + "swp %0, %3, [%2] \n\t" + "cmp %1, %0 \n\t" + "swpne %1, %0, [%2] \n\t" "bne 0b \n\t" - "mov %0,#1 \n" + "mov %0, #1 \n" "1:\n\t" "" : "=&r"(__result), "=&r"(__tmp) : "r" (__p), "r" (__newval), "r" (__oldval) : "cc", "memory"); +#endif return __result; } @@ -94,13 +179,36 @@ __attribute__ ((__unused__)) __always_swap (volatile long *__p, long __newval) { long __result; +#ifdef __thumb__ + long __tmp; + /* Since this function is inlined, we can't be sure of the alignment. */ + __asm__ __volatile__ ( + "ldr %0, 4f \n\t" + "bx %0 \n\t" + ".align 0 \n" + "4:\t" + ".word 0f \n\t" + ".code 32\n" + "0:\t" + "swp %0, %3, [%2] \n\t" + "ldr %1, 1f \n\t" + "bx %1 \n" + "1:\t" + ".word 2f \n\t" + ".code 16 \n" + "2:\n" + : "=&l"(__result), "=&r"(__tmp) + : "r"(__p), "r"(__newval) + : "memory"); +#else __asm__ __volatile__ ( "\n\t" - "swp %0,%2,[%1] \n\t" + "swp %0, %2, [%1] \n\t" "" : "=&r"(__result) : "r"(__p), "r"(__newval) : "memory"); +#endif return __result; } @@ -110,21 +218,50 @@ __test_and_set (volatile long *__p, long __newval) { int __result; long __tmp; +#ifdef __thumb__ + /* Since this function is inlined, we can't be sure of the alignment. */ + __asm__ __volatile__ ( + "ldr %0, 4f \n\t" + "bx %0 \n\t" + ".align 0 \n" + "4:\t" + ".word 0f \n\t" + ".code 32 \n" + "0:\t" + "ldr %0, [%2] \n\t" + "cmp %0, #0 \n\t" + "bne 1f \n\t" + "swp %1, %3, [%2] \n\t" + "cmp %0, %1 \n\t" + "swpne %0, %1, [%2]\n\t" + "bne 0b \n" + "1:\t" + "ldr %1, 2f \n\t" + "bx %1 \n" + "2:\t" + ".word 3f \n\t" + ".code 16 \n" + "3:" + : "=&l"(__result), "=r" (__tmp) + : "r"(__p), "r"(__newval) + : "cc", "memory"); +#else __asm__ __volatile__ ( "\n" "0:\t" - "ldr %0,[%2] \n\t" - "cmp %0,#0 \n\t" + "ldr %0, [%2] \n\t" + "cmp %0, #0 \n\t" "bne 1f \n\t" - "swp %1,%3,[%2] \n\t" - "cmp %0,%1 \n\t" - "swpne %0,%1,[%2] \n\t" + "swp %1, %3, [%2] \n\t" + "cmp %0, %1 \n\t" + "swpne %0, %1, [%2] \n\t" "bne 0b \n" "1:\n\t" "" : "=&r"(__result), "=r" (__tmp) : "r"(__p), "r"(__newval) : "cc", "memory"); +#endif return __result; }