libatomic: Fix up libat_{,un}lock_n for mingw [PR119796]
Here is just a port of the previously posted patch to mingw which clearly has the same problems. 2025-04-16 Jakub Jelinek <jakub@redhat.com> PR libgcc/101075 PR libgcc/119796 * config/mingw/lock.c (libat_lock_n, libat_unlock_n): Start with computing how many locks will be needed and take into account ((uintptr_t)ptr % WATCH_SIZE). If some locks from the end of the locks array and others from the start of it will be needed, first lock the ones from the start followed by ones from the end.
This commit is contained in:
parent
61dfb0747a
commit
34fe8e9000
1 changed files with 32 additions and 18 deletions
|
@ -87,21 +87,30 @@ libat_lock_n (void *ptr, size_t n)
|
|||
{
|
||||
uintptr_t h = addr_hash (ptr);
|
||||
size_t i = 0;
|
||||
size_t nlocks
|
||||
= (n + ((uintptr_t)ptr % WATCH_SIZE) + WATCH_SIZE - 1) / WATCH_SIZE;
|
||||
|
||||
/* Don't lock more than all the locks we have. */
|
||||
if (n > PAGE_SIZE)
|
||||
n = PAGE_SIZE;
|
||||
if (nlocks > NLOCKS)
|
||||
nlocks = NLOCKS;
|
||||
|
||||
do
|
||||
if (__builtin_expect (h + nlocks > NLOCKS, 0))
|
||||
{
|
||||
size_t j = h + nlocks - NLOCKS;
|
||||
for (; i < j; ++i)
|
||||
{
|
||||
if (!locks[i].mutex)
|
||||
locks[i].mutex = CreateMutex (NULL, FALSE, NULL);
|
||||
WaitForSingleObject (locks[i].mutex, INFINITE);
|
||||
}
|
||||
}
|
||||
|
||||
for (; i < nlocks; ++i)
|
||||
{
|
||||
if (!locks[h].mutex)
|
||||
locks[h].mutex = CreateMutex (NULL, FALSE, NULL);
|
||||
WaitForSingleObject (locks[h].mutex, INFINITE);
|
||||
if (++h == NLOCKS)
|
||||
h = 0;
|
||||
i += WATCH_SIZE;
|
||||
locks[h].mutex = CreateMutex (NULL, FALSE, NULL);
|
||||
WaitForSingleObject (locks[h++].mutex, INFINITE);
|
||||
}
|
||||
while (i < n);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -109,17 +118,22 @@ libat_unlock_n (void *ptr, size_t n)
|
|||
{
|
||||
uintptr_t h = addr_hash (ptr);
|
||||
size_t i = 0;
|
||||
size_t nlocks
|
||||
= (n + ((uintptr_t)ptr % WATCH_SIZE) + WATCH_SIZE - 1) / WATCH_SIZE;
|
||||
|
||||
if (n > PAGE_SIZE)
|
||||
n = PAGE_SIZE;
|
||||
/* Don't lock more than all the locks we have. */
|
||||
if (nlocks > NLOCKS)
|
||||
nlocks = NLOCKS;
|
||||
|
||||
do
|
||||
if (__builtin_expect (h + nlocks > NLOCKS, 0))
|
||||
{
|
||||
if (locks[h].mutex)
|
||||
ReleaseMutex (locks[h].mutex);
|
||||
if (++h == NLOCKS)
|
||||
h = 0;
|
||||
i += WATCH_SIZE;
|
||||
size_t j = h + nlocks - NLOCKS;
|
||||
for (; i < j; ++i)
|
||||
if (locks[i].mutex)
|
||||
ReleaseMutex (locks[i].mutex);
|
||||
}
|
||||
while (i < n);
|
||||
|
||||
for (; i < nlocks; ++i, ++h)
|
||||
if (locks[h].mutex)
|
||||
ReleaseMutex (locks[h].mutex);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue