From 8ee9fac21ec877d5e0bbcd3460eb584bbeccf836 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 9 Nov 2011 11:51:49 -0800 Subject: [PATCH] libitm: Avoid non-portable x86 branch prediction mnemonic. From-SVN: r181233 --- libitm/ChangeLog | 3 +++ libitm/config/x86/cacheline.h | 8 +++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/libitm/ChangeLog b/libitm/ChangeLog index e78716d2a37..0501d168a23 100644 --- a/libitm/ChangeLog +++ b/libitm/ChangeLog @@ -1,5 +1,8 @@ 2011-11-09 Richard Henderson + * config/x86/cacheline.h (gtm_cacheline::store_mask): Use .byte + to emit branch prediction hint. + * config/x86/sjlj.S: Protect elf directives with __ELF__. Protect .note.GNU-stack with __linux__. diff --git a/libitm/config/x86/cacheline.h b/libitm/config/x86/cacheline.h index 15a95b0be5b..f91d7ccb802 100644 --- a/libitm/config/x86/cacheline.h +++ b/libitm/config/x86/cacheline.h @@ -144,7 +144,7 @@ gtm_cacheline::operator= (const gtm_cacheline & __restrict s) } #endif -// ??? Support masked integer stores more efficiently with an unlocked cmpxchg +// Support masked integer stores more efficiently with an unlocked cmpxchg // insn. My reasoning is that while we write to locations that we do not wish // to modify, we do it in an uninterruptable insn, and so we either truely // write back the original data or the insn fails -- unlike with a @@ -171,7 +171,8 @@ gtm_cacheline::store_mask (uint32_t *d, uint32_t s, uint8_t m) "and %[m], %[n]\n\t" "or %[s], %[n]\n\t" "cmpxchg %[n], %[d]\n\t" - "jnz,pn 0b" + ".byte 0x2e\n\t" // predict not-taken, aka jnz,pn + "jnz 0b" : [d] "+m"(*d), [n] "=&r" (n), [o] "+a"(o) : [s] "r" (s & bm), [m] "r" (~bm)); } @@ -198,7 +199,8 @@ gtm_cacheline::store_mask (uint64_t *d, uint64_t s, uint8_t m) "and %[m], %[n]\n\t" "or %[s], %[n]\n\t" "cmpxchg %[n], %[d]\n\t" - "jnz,pn 0b" + ".byte 0x2e\n\t" // predict not-taken, aka jnz,pn + "jnz 0b" : [d] "+m"(*d), [n] "=&r" (n), [o] "+a"(o) : [s] "r" (s & bm), [m] "r" (~bm)); #else