From 8b7f0493df968f57cb75e03105c9411b5680ef12 Mon Sep 17 00:00:00 2001 From: Niels Breet Date: Tue, 24 Sep 2019 15:17:32 +0300 Subject: [PATCH] [Security] Various security fixes. Fixes JB#46726 Fixes CVE-2019-9169 CVE-2019-7309 CVE-2016-10739 CVE-2018-19591 --- git-updates.diff | 1033 +++++++++++++++++++++++++++++++++++++++++++++- glibc.changes | 5 + glibc.spec | 2 +- 3 files changed, 1034 insertions(+), 6 deletions(-) diff --git a/git-updates.diff b/git-updates.diff index 3930e2e..034b6ae 100644 --- a/git-updates.diff +++ b/git-updates.diff @@ -1,10 +1,32 @@ GIT update of https://sourceware.org/git/glibc.git/release/2.28/master from glibc-2.28 diff --git a/ChangeLog b/ChangeLog -index 08b42bd2f5..5667d9262b 100644 +index 08b42bd2f5..63917cc15d 100644 --- a/ChangeLog +++ b/ChangeLog -@@ -1,3 +1,800 @@ +@@ -1,3 +1,822 @@ ++2019-04-23 Adhemerval Zanella ++ ++ [BZ #18035] ++ * elf/pldd-xx.c: Use _Static_assert in of pldd_assert. ++ (E(find_maps)): Avoid use alloca, use default read file operations ++ instead of explicit LFS names, and fix infinite loop. ++ * elf/pldd.c: Explicit set _FILE_OFFSET_BITS, cleanup headers. ++ (get_process_info): Use _Static_assert instead of assert, use default ++ directory operations instead of explicit LFS names, and free some ++ leadek pointers. ++ ++2019-04-03 TAMUKI Shoichi ++ ++ [BZ #22964] ++ * localedata/locales/ja_JP (LC_TIME): Add entry for the new Japanese ++ era. ++ ++2019-03-21 Stefan Liebler ++ ++ * sysdeps/s390/dl-procinfo.h (HWCAP_IMPORTANT): ++ Add HWCAP_S390_VX and HWCAP_S390_VXE. ++ +2019-01-31 Paul Eggert + + CVE-2019-9169 @@ -823,15 +845,19 @@ index 608ffe648c..f5e81bdf5d 100644 # We might want to compile with some stack-protection flag. ifneq ($(stack-protector),) diff --git a/NEWS b/NEWS -index 154ab22d7c..f4981a16f0 100644 +index 154ab22d7c..9c2c37652f 100644 --- a/NEWS +++ b/NEWS -@@ -5,6 +5,82 @@ See the end for copying conditions. +@@ -5,6 +5,87 @@ See the end for copying conditions. Please send GNU C library bug reports via using `glibc' in the "product" field. +Version 2.28.1 + ++Major new features: ++ ++* The entry for the new Japanese era has been added for ja_JP locale. ++ +Deprecated and removed features, and other changes affecting compatibility: + +* For powercp64le ABI, Transactional Lock Elision is now enabled iff kernel @@ -845,6 +871,7 @@ index 154ab22d7c..f4981a16f0 100644 + +The following bugs are resolved with this release: + ++ [18035] Fix pldd hang + [19444] build failures with -O1 due to -Wmaybe-uninitialized + [20018] getaddrinfo should reject IP addresses with trailing characters + [20209] localedata: Spelling mistake for Sunday in Greenlandic kl_GL @@ -909,7 +936,7 @@ index 154ab22d7c..f4981a16f0 100644 Version 2.28 Major new features: -@@ -422,6 +498,8 @@ The following bugs are resolved with this release: +@@ -422,6 +503,8 @@ The following bugs are resolved with this release: [23459] libc: COMMON_CPUID_INDEX_80000001 isn't populated for Intel processors [23467] dynamic-link: x86/CET: A property note parser bug @@ -1060,6 +1087,388 @@ index 63bbc89776..3d2f4a7a76 100644 ElfW(Sym) *defsym = ((ElfW(Sym) *) D_PTR (reloc_result->bound, l_info[DT_SYMTAB]) + reloc_result->boundndx); +diff --git a/elf/pldd-xx.c b/elf/pldd-xx.c +index 2823dea662..f818d98582 100644 +--- a/elf/pldd-xx.c ++++ b/elf/pldd-xx.c +@@ -23,10 +23,6 @@ + #define EW_(e, w, t) EW__(e, w, _##t) + #define EW__(e, w, t) e##w##t + +-#define pldd_assert(name, exp) \ +- typedef int __assert_##name[((exp) != 0) - 1] +- +- + struct E(link_map) + { + EW(Addr) l_addr; +@@ -39,12 +35,12 @@ struct E(link_map) + EW(Addr) l_libname; + }; + #if CLASS == __ELF_NATIVE_CLASS +-pldd_assert (l_addr, (offsetof (struct link_map, l_addr) +- == offsetof (struct E(link_map), l_addr))); +-pldd_assert (l_name, (offsetof (struct link_map, l_name) +- == offsetof (struct E(link_map), l_name))); +-pldd_assert (l_next, (offsetof (struct link_map, l_next) +- == offsetof (struct E(link_map), l_next))); ++_Static_assert (offsetof (struct link_map, l_addr) ++ == offsetof (struct E(link_map), l_addr), "l_addr"); ++_Static_assert (offsetof (struct link_map, l_name) ++ == offsetof (struct E(link_map), l_name), "l_name"); ++_Static_assert (offsetof (struct link_map, l_next) ++ == offsetof (struct E(link_map), l_next), "l_next"); + #endif + + +@@ -54,10 +50,10 @@ struct E(libname_list) + EW(Addr) next; + }; + #if CLASS == __ELF_NATIVE_CLASS +-pldd_assert (name, (offsetof (struct libname_list, name) +- == offsetof (struct E(libname_list), name))); +-pldd_assert (next, (offsetof (struct libname_list, next) +- == offsetof (struct E(libname_list), next))); ++_Static_assert (offsetof (struct libname_list, name) ++ == offsetof (struct E(libname_list), name), "name"); ++_Static_assert (offsetof (struct libname_list, next) ++ == offsetof (struct E(libname_list), next), "next"); + #endif + + struct E(r_debug) +@@ -69,16 +65,17 @@ struct E(r_debug) + EW(Addr) r_map; + }; + #if CLASS == __ELF_NATIVE_CLASS +-pldd_assert (r_version, (offsetof (struct r_debug, r_version) +- == offsetof (struct E(r_debug), r_version))); +-pldd_assert (r_map, (offsetof (struct r_debug, r_map) +- == offsetof (struct E(r_debug), r_map))); ++_Static_assert (offsetof (struct r_debug, r_version) ++ == offsetof (struct E(r_debug), r_version), "r_version"); ++_Static_assert (offsetof (struct r_debug, r_map) ++ == offsetof (struct E(r_debug), r_map), "r_map"); + #endif + + + static int + +-E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) ++E(find_maps) (const char *exe, int memfd, pid_t pid, void *auxv, ++ size_t auxv_size) + { + EW(Addr) phdr = 0; + unsigned int phnum = 0; +@@ -104,12 +101,9 @@ E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) + if (phdr == 0 || phnum == 0 || phent == 0) + error (EXIT_FAILURE, 0, gettext ("cannot find program header of process")); + +- EW(Phdr) *p = alloca (phnum * phent); +- if (pread64 (memfd, p, phnum * phent, phdr) != phnum * phent) +- { +- error (0, 0, gettext ("cannot read program header")); +- return EXIT_FAILURE; +- } ++ EW(Phdr) *p = xmalloc (phnum * phent); ++ if (pread (memfd, p, phnum * phent, phdr) != phnum * phent) ++ error (EXIT_FAILURE, 0, gettext ("cannot read program header")); + + /* Determine the load offset. We need this for interpreting the + other program header entries so we do this in a separate loop. +@@ -129,24 +123,18 @@ E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) + if (p[i].p_type == PT_DYNAMIC) + { + EW(Dyn) *dyn = xmalloc (p[i].p_filesz); +- if (pread64 (memfd, dyn, p[i].p_filesz, offset + p[i].p_vaddr) ++ if (pread (memfd, dyn, p[i].p_filesz, offset + p[i].p_vaddr) + != p[i].p_filesz) +- { +- error (0, 0, gettext ("cannot read dynamic section")); +- return EXIT_FAILURE; +- } ++ error (EXIT_FAILURE, 0, gettext ("cannot read dynamic section")); + + /* Search for the DT_DEBUG entry. */ + for (unsigned int j = 0; j < p[i].p_filesz / sizeof (EW(Dyn)); ++j) + if (dyn[j].d_tag == DT_DEBUG && dyn[j].d_un.d_ptr != 0) + { + struct E(r_debug) r; +- if (pread64 (memfd, &r, sizeof (r), dyn[j].d_un.d_ptr) ++ if (pread (memfd, &r, sizeof (r), dyn[j].d_un.d_ptr) + != sizeof (r)) +- { +- error (0, 0, gettext ("cannot read r_debug")); +- return EXIT_FAILURE; +- } ++ error (EXIT_FAILURE, 0, gettext ("cannot read r_debug")); + + if (r.r_map != 0) + { +@@ -160,13 +148,10 @@ E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) + } + else if (p[i].p_type == PT_INTERP) + { +- interp = alloca (p[i].p_filesz); +- if (pread64 (memfd, interp, p[i].p_filesz, offset + p[i].p_vaddr) ++ interp = xmalloc (p[i].p_filesz); ++ if (pread (memfd, interp, p[i].p_filesz, offset + p[i].p_vaddr) + != p[i].p_filesz) +- { +- error (0, 0, gettext ("cannot read program interpreter")); +- return EXIT_FAILURE; +- } ++ error (EXIT_FAILURE, 0, gettext ("cannot read program interpreter")); + } + + if (list == 0) +@@ -174,14 +159,16 @@ E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) + if (interp == NULL) + { + // XXX check whether the executable itself is the loader +- return EXIT_FAILURE; ++ exit (EXIT_FAILURE); + } + + // XXX perhaps try finding ld.so and _r_debug in it +- +- return EXIT_FAILURE; ++ exit (EXIT_FAILURE); + } + ++ free (p); ++ free (interp); ++ + /* Print the PID and program name first. */ + printf ("%lu:\t%s\n", (unsigned long int) pid, exe); + +@@ -192,47 +179,27 @@ E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) + do + { + struct E(link_map) m; +- if (pread64 (memfd, &m, sizeof (m), list) != sizeof (m)) +- { +- error (0, 0, gettext ("cannot read link map")); +- status = EXIT_FAILURE; +- goto out; +- } ++ if (pread (memfd, &m, sizeof (m), list) != sizeof (m)) ++ error (EXIT_FAILURE, 0, gettext ("cannot read link map")); + + EW(Addr) name_offset = m.l_name; +- again: + while (1) + { +- ssize_t n = pread64 (memfd, tmpbuf.data, tmpbuf.length, name_offset); ++ ssize_t n = pread (memfd, tmpbuf.data, tmpbuf.length, name_offset); + if (n == -1) +- { +- error (0, 0, gettext ("cannot read object name")); +- status = EXIT_FAILURE; +- goto out; +- } ++ error (EXIT_FAILURE, 0, gettext ("cannot read object name")); + + if (memchr (tmpbuf.data, '\0', n) != NULL) + break; + + if (!scratch_buffer_grow (&tmpbuf)) +- { +- error (0, 0, gettext ("cannot allocate buffer for object name")); +- status = EXIT_FAILURE; +- goto out; +- } ++ error (EXIT_FAILURE, 0, ++ gettext ("cannot allocate buffer for object name")); + } + +- if (((char *)tmpbuf.data)[0] == '\0' && name_offset == m.l_name +- && m.l_libname != 0) +- { +- /* Try the l_libname element. */ +- struct E(libname_list) ln; +- if (pread64 (memfd, &ln, sizeof (ln), m.l_libname) == sizeof (ln)) +- { +- name_offset = ln.name; +- goto again; +- } +- } ++ /* The m.l_name and m.l_libname.name for loader linkmap points to same ++ values (since BZ#387 fix). Trying to use l_libname name as the ++ shared object name might lead to an infinite loop (BZ#18035). */ + + /* Skip over the executable. */ + if (((char *)tmpbuf.data)[0] != '\0') +@@ -242,7 +209,6 @@ E(find_maps) (pid_t pid, void *auxv, size_t auxv_size) + } + while (list != 0); + +- out: + scratch_buffer_free (&tmpbuf); + return status; + } +diff --git a/elf/pldd.c b/elf/pldd.c +index b8106fdc33..0bdfff450a 100644 +--- a/elf/pldd.c ++++ b/elf/pldd.c +@@ -17,23 +17,17 @@ + License along with the GNU C Library; if not, see + . */ + +-#include ++#define _FILE_OFFSET_BITS 64 ++ + #include +-#include + #include +-#include +-#include + #include + #include + #include +-#include +-#include + #include + #include +-#include + #include + #include +-#include + #include + #include + +@@ -76,14 +70,9 @@ static struct argp argp = + options, parse_opt, args_doc, doc, NULL, more_help, NULL + }; + +-// File descriptor of /proc/*/mem file. +-static int memfd; +- +-/* Name of the executable */ +-static char *exe; + + /* Local functions. */ +-static int get_process_info (int dfd, long int pid); ++static int get_process_info (const char *exe, int dfd, long int pid); + static void wait_for_ptrace_stop (long int pid); + + +@@ -102,8 +91,10 @@ main (int argc, char *argv[]) + return 1; + } + +- assert (sizeof (pid_t) == sizeof (int) +- || sizeof (pid_t) == sizeof (long int)); ++ _Static_assert (sizeof (pid_t) == sizeof (int) ++ || sizeof (pid_t) == sizeof (long int), ++ "sizeof (pid_t) != sizeof (int) or sizeof (long int)"); ++ + char *endp; + errno = 0; + long int pid = strtol (argv[remaining], &endp, 10); +@@ -119,25 +110,24 @@ main (int argc, char *argv[]) + if (dfd == -1) + error (EXIT_FAILURE, errno, gettext ("cannot open %s"), buf); + +- struct scratch_buffer exebuf; +- scratch_buffer_init (&exebuf); ++ /* Name of the executable */ ++ struct scratch_buffer exe; ++ scratch_buffer_init (&exe); + ssize_t nexe; + while ((nexe = readlinkat (dfd, "exe", +- exebuf.data, exebuf.length)) == exebuf.length) ++ exe.data, exe.length)) == exe.length) + { +- if (!scratch_buffer_grow (&exebuf)) ++ if (!scratch_buffer_grow (&exe)) + { + nexe = -1; + break; + } + } + if (nexe == -1) +- exe = (char *) ""; ++ /* Default stack allocation is at least 1024. */ ++ snprintf (exe.data, exe.length, ""); + else +- { +- exe = exebuf.data; +- exe[nexe] = '\0'; +- } ++ ((char*)exe.data)[nexe] = '\0'; + + /* Stop all threads since otherwise the list of loaded modules might + change while we are reading it. */ +@@ -155,8 +145,8 @@ main (int argc, char *argv[]) + error (EXIT_FAILURE, errno, gettext ("cannot prepare reading %s/task"), + buf); + +- struct dirent64 *d; +- while ((d = readdir64 (dir)) != NULL) ++ struct dirent *d; ++ while ((d = readdir (dir)) != NULL) + { + if (! isdigit (d->d_name[0])) + continue; +@@ -182,7 +172,7 @@ main (int argc, char *argv[]) + + wait_for_ptrace_stop (tid); + +- struct thread_list *newp = alloca (sizeof (*newp)); ++ struct thread_list *newp = xmalloc (sizeof (*newp)); + newp->tid = tid; + newp->next = thread_list; + thread_list = newp; +@@ -190,17 +180,22 @@ main (int argc, char *argv[]) + + closedir (dir); + +- int status = get_process_info (dfd, pid); ++ if (thread_list == NULL) ++ error (EXIT_FAILURE, 0, gettext ("no valid %s/task entries"), buf); ++ ++ int status = get_process_info (exe.data, dfd, pid); + +- assert (thread_list != NULL); + do + { + ptrace (PTRACE_DETACH, thread_list->tid, NULL, NULL); ++ struct thread_list *prev = thread_list; + thread_list = thread_list->next; ++ free (prev); + } + while (thread_list != NULL); + + close (dfd); ++ scratch_buffer_free (&exe); + + return status; + } +@@ -281,9 +276,10 @@ warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\ + + + static int +-get_process_info (int dfd, long int pid) ++get_process_info (const char *exe, int dfd, long int pid) + { +- memfd = openat (dfd, "mem", O_RDONLY); ++ /* File descriptor of /proc//mem file. */ ++ int memfd = openat (dfd, "mem", O_RDONLY); + if (memfd == -1) + goto no_info; + +@@ -333,9 +329,9 @@ get_process_info (int dfd, long int pid) + + int retval; + if (e_ident[EI_CLASS] == ELFCLASS32) +- retval = find_maps32 (pid, auxv, auxv_size); ++ retval = find_maps32 (exe, memfd, pid, auxv, auxv_size); + else +- retval = find_maps64 (pid, auxv, auxv_size); ++ retval = find_maps64 (exe, memfd, pid, auxv, auxv_size); + + free (auxv); + close (memfd); diff --git a/elf/tst-unwind-main.c b/elf/tst-unwind-main.c new file mode 100644 index 0000000000..7c20f04743 @@ -1303,6 +1712,21 @@ index 9322ef68da..63f5227760 100644 +#define TIMEOUT 100 #define PREPARE prepare #include +diff --git a/localedata/locales/ja_JP b/localedata/locales/ja_JP +index 1fd2fee44b..30190b6248 100644 +--- a/localedata/locales/ja_JP ++++ b/localedata/locales/ja_JP +@@ -14946,7 +14946,9 @@ am_pm "";"" + + t_fmt_ampm "%p%I%M%S" + +-era "+:2:1990//01//01:+*::%EC%Ey";/ ++era "+:2:2020//01//01:+*::%EC%Ey";/ ++ "+:1:2019//05//01:2019//12//31::%EC";/ ++ "+:2:1990//01//01:2019//04//30::%EC%Ey";/ + "+:1:1989//01//08:1989//12//31::%EC";/ + "+:2:1927//01//01:1989//01//07::%EC%Ey";/ + "+:1:1926//12//25:1926//12//31::%EC";/ diff --git a/localedata/locales/kl_GL b/localedata/locales/kl_GL index 5ab14a31aa..5723ce7dcf 100644 --- a/localedata/locales/kl_GL @@ -2060,6 +2484,57 @@ index be8066524c..5be7655529 100644 # The tests here better do not run in parallel ifneq ($(filter %tests,$(MAKECMDGOALS)),) .NOTPARALLEL: +diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h +index 13bdb11133..19efe1e35f 100644 +--- a/nptl/pthreadP.h ++++ b/nptl/pthreadP.h +@@ -110,19 +110,23 @@ enum + }; + #define PTHREAD_MUTEX_PSHARED_BIT 128 + ++/* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ + #define PTHREAD_MUTEX_TYPE(m) \ +- ((m)->__data.__kind & 127) ++ (atomic_load_relaxed (&((m)->__data.__kind)) & 127) + /* Don't include NO_ELISION, as that type is always the same + as the underlying lock type. */ + #define PTHREAD_MUTEX_TYPE_ELISION(m) \ +- ((m)->__data.__kind & (127|PTHREAD_MUTEX_ELISION_NP)) ++ (atomic_load_relaxed (&((m)->__data.__kind)) \ ++ & (127 | PTHREAD_MUTEX_ELISION_NP)) + + #if LLL_PRIVATE == 0 && LLL_SHARED == 128 + # define PTHREAD_MUTEX_PSHARED(m) \ +- ((m)->__data.__kind & 128) ++ (atomic_load_relaxed (&((m)->__data.__kind)) & 128) + #else + # define PTHREAD_MUTEX_PSHARED(m) \ +- (((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE) ++ ((atomic_load_relaxed (&((m)->__data.__kind)) & 128) \ ++ ? LLL_SHARED : LLL_PRIVATE) + #endif + + /* The kernel when waking robust mutexes on exit never uses +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +index 8e425eb01e..479e54febb 100644 +--- a/nptl/pthread_cond_common.c ++++ b/nptl/pthread_cond_common.c +@@ -405,8 +405,12 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, + { + /* There is still a waiter after spinning. Set the wake-request + flag and block. Relaxed MO is fine because this is just about +- this futex word. */ +- r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1); ++ this futex word. ++ ++ Update r to include the set wake-request flag so that the upcoming ++ futex_wait only blocks if the flag is still set (otherwise, we'd ++ violate the basic client-side futex protocol). */ ++ r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1) | 1; + + if ((r >> 1) > 0) + futex_wait_simple (cond->__data.__g_refs + g1, r, private); diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c index 3e11054182..ebf07ca82d 100644 --- a/nptl/pthread_cond_wait.c @@ -2073,6 +2548,540 @@ index 3e11054182..ebf07ca82d 100644 /* Convert the absolute timeout value to a relative timeout. */ rt.tv_sec = abstime->tv_sec - rt.tv_sec; +diff --git a/nptl/pthread_mutex_consistent.c b/nptl/pthread_mutex_consistent.c +index 85b8e1a6cb..4fbd875430 100644 +--- a/nptl/pthread_mutex_consistent.c ++++ b/nptl/pthread_mutex_consistent.c +@@ -23,8 +23,11 @@ + int + pthread_mutex_consistent (pthread_mutex_t *mutex) + { +- /* Test whether this is a robust mutex with a dead owner. */ +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 ++ /* Test whether this is a robust mutex with a dead owner. ++ See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 + || mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT) + return EINVAL; + +diff --git a/nptl/pthread_mutex_destroy.c b/nptl/pthread_mutex_destroy.c +index 5a22611541..713ea68496 100644 +--- a/nptl/pthread_mutex_destroy.c ++++ b/nptl/pthread_mutex_destroy.c +@@ -27,12 +27,17 @@ __pthread_mutex_destroy (pthread_mutex_t *mutex) + { + LIBC_PROBE (mutex_destroy, 1, mutex); + +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0 + && mutex->__data.__nusers != 0) + return EBUSY; + +- /* Set to an invalid value. */ +- mutex->__data.__kind = -1; ++ /* Set to an invalid value. Relaxed MO is enough as it is undefined behavior ++ if the mutex is used after it has been destroyed. But you can reinitialize ++ it with pthread_mutex_init. */ ++ atomic_store_relaxed (&(mutex->__data.__kind), -1); + + return 0; + } +diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c +index d8fe473728..5cf290c272 100644 +--- a/nptl/pthread_mutex_init.c ++++ b/nptl/pthread_mutex_init.c +@@ -101,7 +101,7 @@ __pthread_mutex_init (pthread_mutex_t *mutex, + memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T); + + /* Copy the values from the attribute. */ +- mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS; ++ int mutex_kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS; + + if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0) + { +@@ -111,17 +111,17 @@ __pthread_mutex_init (pthread_mutex_t *mutex, + return ENOTSUP; + #endif + +- mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP; + } + + switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK) + { + case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: +- mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP; ++ mutex_kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP; + break; + + case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT: +- mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP; ++ mutex_kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP; + + int ceiling = (imutexattr->mutexkind + & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK) +@@ -145,7 +145,11 @@ __pthread_mutex_init (pthread_mutex_t *mutex, + FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ + if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED + | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0) +- mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT; ++ mutex_kind |= PTHREAD_MUTEX_PSHARED_BIT; ++ ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ atomic_store_relaxed (&(mutex->__data.__kind), mutex_kind); + + /* Default values: mutex not used yet. */ + // mutex->__count = 0; already done by memset +diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c +index 1519c142bd..29cc143e6c 100644 +--- a/nptl/pthread_mutex_lock.c ++++ b/nptl/pthread_mutex_lock.c +@@ -62,6 +62,8 @@ static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) + int + __pthread_mutex_lock (pthread_mutex_t *mutex) + { ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); + + LIBC_PROBE (mutex_entry, 1, mutex); +@@ -350,8 +352,14 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) + case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } + + if (robust) + { +@@ -502,7 +510,10 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex) + case PTHREAD_MUTEX_PP_NORMAL_NP: + case PTHREAD_MUTEX_PP_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; + + oldval = mutex->__data.__lock; + +@@ -607,15 +618,18 @@ hidden_def (__pthread_mutex_lock) + void + __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) + { +- assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); +- assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); +- assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); ++ assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); ++ assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); + + /* Record the ownership. */ + pid_t id = THREAD_GETMEM (THREAD_SELF, tid); + mutex->__data.__owner = id; + +- if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) ++ if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) + ++mutex->__data.__count; + } + #endif +diff --git a/nptl/pthread_mutex_setprioceiling.c b/nptl/pthread_mutex_setprioceiling.c +index 8594874f85..8306cabcf4 100644 +--- a/nptl/pthread_mutex_setprioceiling.c ++++ b/nptl/pthread_mutex_setprioceiling.c +@@ -27,9 +27,10 @@ int + pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling, + int *old_ceiling) + { +- /* The low bits of __kind aren't ever changed after pthread_mutex_init, +- so we don't need a lock yet. */ +- if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0) ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0) + return EINVAL; + + /* See __init_sched_fifo_prio. */ +diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c +index 28237b0e58..888c12fe28 100644 +--- a/nptl/pthread_mutex_timedlock.c ++++ b/nptl/pthread_mutex_timedlock.c +@@ -53,6 +53,8 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex, + /* We must not check ABSTIME here. If the thread does not block + abstime must not be checked for a valid value. */ + ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), + PTHREAD_MUTEX_TIMED_NP)) + { +@@ -338,8 +340,14 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex, + case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } + + if (robust) + { +@@ -509,7 +517,10 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex, + case PTHREAD_MUTEX_PP_NORMAL_NP: + case PTHREAD_MUTEX_PP_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; + + oldval = mutex->__data.__lock; + +diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c +index 7de61f4f68..8e01113b0f 100644 +--- a/nptl/pthread_mutex_trylock.c ++++ b/nptl/pthread_mutex_trylock.c +@@ -36,6 +36,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + int oldval; + pid_t id = THREAD_GETMEM (THREAD_SELF, tid); + ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), + PTHREAD_MUTEX_TIMED_NP)) + { +@@ -92,6 +94,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, + &mutex->__data.__list.__next); ++ /* We need to set op_pending before starting the operation. Also ++ see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); + + oldval = mutex->__data.__lock; + do +@@ -117,7 +122,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + /* But it is inconsistent unless marked otherwise. */ + mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; + ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); + ENQUEUE_MUTEX (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + /* Note that we deliberately exist here. If we fall +@@ -133,6 +143,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + int kind = PTHREAD_MUTEX_TYPE (mutex); + if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) + { ++ /* We do not need to ensure ordering wrt another memory ++ access. Also see comments at ENQUEUE_MUTEX. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, + NULL); + return EDEADLK; +@@ -140,6 +152,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + + if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) + { ++ /* We do not need to ensure ordering wrt another memory ++ access. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, + NULL); + +@@ -158,6 +172,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + id, 0); + if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) + { ++ /* We haven't acquired the lock as it is already acquired by ++ another owner. We do not need to ensure ordering wrt another ++ memory access. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + return EBUSY; +@@ -171,13 +188,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + if (oldval == id) + lll_unlock (mutex->__data.__lock, + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); ++ /* FIXME This violates the mutex destruction requirements. See ++ __pthread_mutex_unlock_full. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + return ENOTRECOVERABLE; + } + } + while ((oldval & FUTEX_OWNER_DIED) != 0); + ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); + ENQUEUE_MUTEX (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + mutex->__data.__owner = id; +@@ -199,14 +223,25 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: + case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } + + if (robust) +- /* Note: robust PI futexes are signaled by setting bit 0. */ +- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, +- (void *) (((uintptr_t) &mutex->__data.__list.__next) +- | 1)); ++ { ++ /* Note: robust PI futexes are signaled by setting bit 0. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ (void *) (((uintptr_t) &mutex->__data.__list.__next) ++ | 1)); ++ /* We need to set op_pending before starting the operation. Also ++ see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ } + + oldval = mutex->__data.__lock; + +@@ -215,12 +250,16 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + { + if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) + { ++ /* We do not need to ensure ordering wrt another memory ++ access. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + return EDEADLK; + } + + if (kind == PTHREAD_MUTEX_RECURSIVE_NP) + { ++ /* We do not need to ensure ordering wrt another memory ++ access. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + /* Just bump the counter. */ +@@ -242,6 +281,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + { + if ((oldval & FUTEX_OWNER_DIED) == 0) + { ++ /* We haven't acquired the lock as it is already acquired by ++ another owner. We do not need to ensure ordering wrt another ++ memory access. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + return EBUSY; +@@ -262,6 +304,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + if (INTERNAL_SYSCALL_ERROR_P (e, __err) + && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK) + { ++ /* The kernel has not yet finished the mutex owner death. ++ We do not need to ensure ordering wrt another memory ++ access. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + return EBUSY; +@@ -279,7 +324,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + /* But it is inconsistent unless marked otherwise. */ + mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; + ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); + ENQUEUE_MUTEX (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + + /* Note that we deliberately exit here. If we fall +@@ -302,13 +352,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), + 0, 0); + ++ /* To the kernel, this will be visible after the kernel has ++ acquired the mutex in the syscall. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + return ENOTRECOVERABLE; + } + + if (robust) + { ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); + ENQUEUE_MUTEX_PI (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); + } + +@@ -325,7 +382,10 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) + case PTHREAD_MUTEX_PP_NORMAL_NP: + case PTHREAD_MUTEX_PP_ADAPTIVE_NP: + { +- int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; + + oldval = mutex->__data.__lock; + +diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c +index 9ea62943b7..68d04d5395 100644 +--- a/nptl/pthread_mutex_unlock.c ++++ b/nptl/pthread_mutex_unlock.c +@@ -35,6 +35,8 @@ int + attribute_hidden + __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr) + { ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ + int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); + if (__builtin_expect (type & + ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) +@@ -222,13 +224,19 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr) + /* If the previous owner died and the caller did not succeed in + making the state consistent, mark the mutex as unrecoverable + and make all waiters. */ +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0 ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0 + && __builtin_expect (mutex->__data.__owner + == PTHREAD_MUTEX_INCONSISTENT, 0)) + pi_notrecoverable: + newowner = PTHREAD_MUTEX_NOTRECOVERABLE; + +- if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0) ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0) + { + continue_pi_robust: + /* Remove mutex from the list. +@@ -251,7 +259,10 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr) + /* Unlock. Load all necessary mutex data before releasing the mutex + to not violate the mutex destruction requirements (see + lll_unlock). */ +- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int robust = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP; + private = (robust + ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) + : PTHREAD_MUTEX_PSHARED (mutex)); +diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c +index a290d08332..9ce36d1026 100644 +--- a/nptl/pthread_rwlock_common.c ++++ b/nptl/pthread_rwlock_common.c +@@ -314,8 +314,8 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock, + harmless because the flag is just about the state of + __readers, and all threads set the flag under the same + conditions. */ +- while ((atomic_load_relaxed (&rwlock->__data.__readers) +- & PTHREAD_RWLOCK_RWAITING) != 0) ++ while (((r = atomic_load_relaxed (&rwlock->__data.__readers)) ++ & PTHREAD_RWLOCK_RWAITING) != 0) + { + int private = __pthread_rwlock_get_private (rwlock); + int err = futex_abstimed_wait (&rwlock->__data.__readers, +diff --git a/nptl/pthread_rwlock_tryrdlock.c b/nptl/pthread_rwlock_tryrdlock.c +index 4aec1fc15a..31a88d33a6 100644 +--- a/nptl/pthread_rwlock_tryrdlock.c ++++ b/nptl/pthread_rwlock_tryrdlock.c +@@ -94,15 +94,22 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) + /* Same as in __pthread_rwlock_rdlock_full: + We started the read phase, so we are also responsible for + updating the write-phase futex. Relaxed MO is sufficient. +- Note that there can be no other reader that we have to wake +- because all other readers will see the read phase started by us +- (or they will try to start it themselves); if a writer started +- the read phase, we cannot have started it. Furthermore, we +- cannot discard a PTHREAD_RWLOCK_FUTEX_USED flag because we will +- overwrite the value set by the most recent writer (or the readers +- before it in case of explicit hand-over) and we know that there +- are no waiting readers. */ +- atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 0); ++ We have to do the same steps as a writer would when handing over the ++ read phase to use because other readers cannot distinguish between ++ us and the writer. ++ Note that __pthread_rwlock_tryrdlock callers will not have to be ++ woken up because they will either see the read phase started by us ++ or they will try to start it themselves; however, callers of ++ __pthread_rwlock_rdlock_full just increase the reader count and then ++ check what state the lock is in, so they cannot distinguish between ++ us and a writer that acquired and released the lock in the ++ meantime. */ ++ if ((atomic_exchange_relaxed (&rwlock->__data.__wrphase_futex, 0) ++ & PTHREAD_RWLOCK_FUTEX_USED) != 0) ++ { ++ int private = __pthread_rwlock_get_private (rwlock); ++ futex_wake (&rwlock->__data.__wrphase_futex, INT_MAX, private); ++ } + } + + return 0; +diff --git a/nptl/pthread_rwlock_trywrlock.c b/nptl/pthread_rwlock_trywrlock.c +index 5a73eba756..f2e3443466 100644 +--- a/nptl/pthread_rwlock_trywrlock.c ++++ b/nptl/pthread_rwlock_trywrlock.c +@@ -46,8 +46,15 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) + &rwlock->__data.__readers, &r, + r | PTHREAD_RWLOCK_WRPHASE | PTHREAD_RWLOCK_WRLOCKED)) + { ++ /* We have become the primary writer and we cannot have shared ++ the PTHREAD_RWLOCK_FUTEX_USED flag with someone else, so we ++ can simply enable blocking (see full wrlock code). */ + atomic_store_relaxed (&rwlock->__data.__writers_futex, 1); +- atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 1); ++ /* If we started a write phase, we need to enable readers to ++ wait. If we did not, we must not change it because other threads ++ may have set the PTHREAD_RWLOCK_FUTEX_USED in the meantime. */ ++ if ((r & PTHREAD_RWLOCK_WRPHASE) == 0) ++ atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 1); + atomic_store_relaxed (&rwlock->__data.__cur_writer, + THREAD_GETMEM (THREAD_SELF, tid)); + return 0; diff --git a/nptl/register-atfork.c b/nptl/register-atfork.c index 5ff1c1be8c..9edb7d4bbb 100644 --- a/nptl/register-atfork.c @@ -7378,6 +8387,20 @@ index d8ba7ba427..ecb24f0a9b 100644 } #endif /* dl-irel.h */ +diff --git a/sysdeps/s390/dl-procinfo.h b/sysdeps/s390/dl-procinfo.h +index b0383bfb4c..f71d64c3ab 100644 +--- a/sysdeps/s390/dl-procinfo.h ++++ b/sysdeps/s390/dl-procinfo.h +@@ -57,7 +57,8 @@ enum + }; + + #define HWCAP_IMPORTANT (HWCAP_S390_ZARCH | HWCAP_S390_LDISP \ +- | HWCAP_S390_EIMM | HWCAP_S390_DFP) ++ | HWCAP_S390_EIMM | HWCAP_S390_DFP \ ++ | HWCAP_S390_VX | HWCAP_S390_VXE) + + /* We cannot provide a general printing function. */ + #define _dl_procinfo(type, word) -1 diff --git a/sysdeps/sparc/sparc32/dl-irel.h b/sysdeps/sparc/sparc32/dl-irel.h index ffca36864f..cf47cda834 100644 --- a/sysdeps/sparc/sparc32/dl-irel.h diff --git a/glibc.changes b/glibc.changes index d87f666..5ec8193 100644 --- a/glibc.changes +++ b/glibc.changes @@ -1,3 +1,8 @@ +* Fri Aug 09 2019 Niels Breet - 2.28+git6 +- Various security fixes. Fixes JB#46726 +- Fixes CVE-2019-9169, CVE-2019-7309, CVE-2016-10739, CVE-2018-19591 + MER#2047 + * Fri Aug 09 2019 Niels Breet - 2.28+git5 - Get rid of quilt build-requires. Contributes to JB#46706 diff --git a/glibc.spec b/glibc.spec index 0797184..3b85ef8 100644 --- a/glibc.spec +++ b/glibc.spec @@ -5,7 +5,7 @@ Name: glibc Summary: GNU C library shared libraries -Version: 2.28+git5 +Version: 2.28+git6 Release: 0 License: LGPLv2+ and LGPLv2+ with exceptions and GPLv2+ Group: System/Libraries