diff src/gmalloc.c @ 91005:424b655804ca

Merge from emacs--devo--0 Patches applied: * emacs--devo--0 (patch 846-851) - Update from CVS - Merge from emacs--rel--22 * emacs--rel--22 (patch 88-92) - Update from CVS - Merge from gnus--rel--5.10 * gnus--rel--5.10 (patch 242-244) - Update from CVS Revision: emacs@sv.gnu.org/emacs--unicode--0--patch-246
author Miles Bader <miles@gnu.org>
date Mon, 13 Aug 2007 13:48:35 +0000
parents 539530fa389c 310b4cdcc703
children
line wrap: on
line diff
--- a/src/gmalloc.c	Wed Aug 08 16:39:00 2007 +0000
+++ b/src/gmalloc.c	Mon Aug 13 13:48:35 2007 +0000
@@ -129,6 +129,8 @@
 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict.  */
 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
 			     __malloc_size_t __size));
+extern int posix_memalign PP ((__ptr_t *, __malloc_size_t,
+			       __malloc_size_t size));
 #endif
 
 /* Allocate SIZE bytes on a page boundary.  */
@@ -136,6 +138,10 @@
 extern __ptr_t valloc PP ((__malloc_size_t __size));
 #endif
 
+#ifdef USE_PTHREAD
+/* Set up mutexes and make malloc etc. thread-safe.  */
+extern void malloc_enable_thread PP ((void));
+#endif
 
 #ifdef _MALLOC_INTERNAL
 
@@ -242,10 +248,27 @@
 
 #ifdef USE_PTHREAD
 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
-#define LOCK()     pthread_mutex_lock (&_malloc_mutex)
-#define UNLOCK()   pthread_mutex_unlock (&_malloc_mutex)
-#define LOCK_ALIGNED_BLOCKS()     pthread_mutex_lock (&_aligned_blocks_mutex)
-#define UNLOCK_ALIGNED_BLOCKS()   pthread_mutex_unlock (&_aligned_blocks_mutex)
+extern int _malloc_thread_enabled_p;
+#define LOCK()					\
+  do {						\
+    if (_malloc_thread_enabled_p)		\
+      pthread_mutex_lock (&_malloc_mutex);	\
+  } while (0)
+#define UNLOCK()				\
+  do {						\
+    if (_malloc_thread_enabled_p)		\
+      pthread_mutex_unlock (&_malloc_mutex);	\
+  } while (0)
+#define LOCK_ALIGNED_BLOCKS()				\
+  do {							\
+    if (_malloc_thread_enabled_p)			\
+      pthread_mutex_lock (&_aligned_blocks_mutex);	\
+  } while (0)
+#define UNLOCK_ALIGNED_BLOCKS()				\
+  do {							\
+    if (_malloc_thread_enabled_p)			\
+      pthread_mutex_unlock (&_aligned_blocks_mutex);	\
+  } while (0)
 #else
 #define LOCK()
 #define UNLOCK()
@@ -563,6 +586,47 @@
 #ifdef USE_PTHREAD
 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
+int _malloc_thread_enabled_p;
+
+static void
+malloc_atfork_handler_prepare ()
+{
+  LOCK ();
+  LOCK_ALIGNED_BLOCKS ();
+}
+
+static void
+malloc_atfork_handler_parent ()
+{
+  UNLOCK_ALIGNED_BLOCKS ();
+  UNLOCK ();
+}
+
+static void
+malloc_atfork_handler_child ()
+{
+  UNLOCK_ALIGNED_BLOCKS ();
+  UNLOCK ();
+}
+
+/* Set up mutexes and make malloc etc. thread-safe.  */
+void
+malloc_enable_thread ()
+{
+  if (_malloc_thread_enabled_p)
+    return;
+
+  /* Some pthread implementations call malloc for statically
+     initialized mutexes when they are used first.  To avoid such a
+     situation, we initialize mutexes here while their use is
+     disabled in malloc etc.  */
+  pthread_mutex_init (&_malloc_mutex, NULL);
+  pthread_mutex_init (&_aligned_blocks_mutex, NULL);
+  pthread_atfork (malloc_atfork_handler_prepare,
+		  malloc_atfork_handler_parent,
+		  malloc_atfork_handler_child);
+  _malloc_thread_enabled_p = 1;
+}
 #endif
 
 static void
@@ -575,19 +639,6 @@
   if (__malloc_initialize_hook)
     (*__malloc_initialize_hook) ();
 
-  /* We don't use recursive mutex because pthread_mutexattr_init may
-     call malloc internally.  */
-#if 0 /* defined (USE_PTHREAD) */
-  {
-    pthread_mutexattr_t attr;
-
-    pthread_mutexattr_init (&attr);
-    pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
-    pthread_mutex_init (&_malloc_mutex, &attr);
-    pthread_mutexattr_destroy (&attr);
-  }
-#endif
-
   heapsize = HEAP / BLOCKSIZE;
   _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
   if (_heapinfo == NULL)
@@ -1808,6 +1859,36 @@
   return result;
 }
 
+#ifndef ENOMEM
+#define ENOMEM 12
+#endif
+
+#ifndef EINVAL
+#define EINVAL 22
+#endif
+
+int
+posix_memalign (memptr, alignment, size)
+     __ptr_t *memptr;
+     __malloc_size_t alignment;
+     __malloc_size_t size;
+{
+  __ptr_t mem;
+
+  if (alignment == 0
+      || alignment % sizeof (__ptr_t) != 0
+      || (alignment & (alignment - 1)) != 0)
+    return EINVAL;
+
+  mem = memalign (alignment, size);
+  if (mem == NULL)
+    return ENOMEM;
+
+  *memptr = mem;
+
+  return 0;
+}
+
 #endif /* Not DJGPP v1 */
 /* Allocate memory on a page boundary.
    Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.