]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
convert multiprocessing to unix line endings
authorBenjamin Peterson <benjamin@python.org>
Fri, 13 Jun 2008 19:13:39 +0000 (19:13 +0000)
committerBenjamin Peterson <benjamin@python.org>
Fri, 13 Jun 2008 19:13:39 +0000 (19:13 +0000)
19 files changed:
Lib/multiprocessing/__init__.py
Lib/multiprocessing/connection.py
Lib/multiprocessing/dummy/__init__.py
Lib/multiprocessing/dummy/connection.py
Lib/multiprocessing/forking.py
Lib/multiprocessing/heap.py
Lib/multiprocessing/managers.py
Lib/multiprocessing/pool.py
Lib/multiprocessing/process.py
Lib/multiprocessing/queues.py
Lib/multiprocessing/reduction.py
Lib/multiprocessing/sharedctypes.py
Lib/multiprocessing/synchronize.py
Lib/multiprocessing/util.py
Lib/test/test_multiprocessing.py
Modules/_multiprocessing/multiprocessing.c
Modules/_multiprocessing/multiprocessing.h
Modules/_multiprocessing/pipe_connection.c
Modules/_multiprocessing/win32_functions.c

index 1cb3222e300be422a5eebccb18c41b520cd1775d..decb2ada17438ced42d0ebdbe6bb6e35054cc480 100644 (file)
@@ -68,10 +68,10 @@ from multiprocessing.process import Process, current_process, active_children
 \r
 class ProcessError(Exception):\r
     pass\r
-    \r
+\r
 class BufferTooShort(ProcessError):\r
     pass\r
-    \r
+\r
 class TimeoutError(ProcessError):\r
     pass\r
 \r
@@ -123,7 +123,7 @@ def cpu_count():
             num = os.sysconf('SC_NPROCESSORS_ONLN')\r
         except (ValueError, OSError, AttributeError):\r
             num = 0\r
-        \r
+\r
     if num >= 1:\r
         return num\r
     else:\r
@@ -151,13 +151,13 @@ def log_to_stderr(level=None):
     '''\r
     from multiprocessing.util import log_to_stderr\r
     return log_to_stderr(level)\r
-    \r
+\r
 def allow_connection_pickling():\r
     '''\r
     Install support for sending connections and sockets between processes\r
     '''\r
     from multiprocessing import reduction\r
-    \r
+\r
 #\r
 # Definitions depending on native semaphores\r
 #\r
@@ -263,7 +263,7 @@ if sys.platform == 'win32':
         '''\r
         Sets the path to a python.exe or pythonw.exe binary used to run\r
         child processes on Windows instead of sys.executable.\r
-        Useful for people embedding Python. \r
+        Useful for people embedding Python.\r
         '''\r
         from multiprocessing.forking import set_executable\r
         set_executable(executable)\r
index 752d9ab2d9f96cb2daf75632d9537a90bb1f3a2e..f5a3301fbdb11f7a628dbf000ee2330454035c64 100644 (file)
@@ -50,7 +50,7 @@ def arbitrary_address(family):
     '''\r
     if family == 'AF_INET':\r
         return ('localhost', 0)\r
-    elif family == 'AF_UNIX':        \r
+    elif family == 'AF_UNIX':\r
         return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())\r
     elif family == 'AF_PIPE':\r
         return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %\r
@@ -160,7 +160,7 @@ if sys.platform != 'win32':
             c2 = _multiprocessing.Connection(fd2, readable=False)\r
 \r
         return c1, c2\r
-    \r
+\r
 else:\r
 \r
     from ._multiprocessing import win32\r
@@ -200,7 +200,7 @@ else:
 \r
         c1 = _multiprocessing.PipeConnection(h1, writable=duplex)\r
         c2 = _multiprocessing.PipeConnection(h2, readable=duplex)\r
-        \r
+\r
         return c1, c2\r
 \r
 #\r
@@ -290,14 +290,14 @@ if sys.platform == 'win32':
                 )\r
             self._handle_queue = [handle]\r
             self._last_accepted = None\r
-            \r
+\r
             sub_debug('listener created with address=%r', self._address)\r
 \r
             self.close = Finalize(\r
                 self, PipeListener._finalize_pipe_listener,\r
                 args=(self._handle_queue, self._address), exitpriority=0\r
                 )\r
-            \r
+\r
         def accept(self):\r
             newhandle = win32.CreateNamedPipe(\r
                 self._address, win32.PIPE_ACCESS_DUPLEX,\r
@@ -320,7 +320,7 @@ if sys.platform == 'win32':
             sub_debug('closing listener with address=%r', address)\r
             for handle in queue:\r
                 close(handle)\r
-        \r
+\r
     def PipeClient(address):\r
         '''\r
         Return a connection object connected to the pipe given by `address`\r
@@ -397,7 +397,7 @@ class ConnectionWrapper(object):
         self._loads = loads\r
         for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):\r
             obj = getattr(conn, attr)\r
-            setattr(self, attr, obj)            \r
+            setattr(self, attr, obj)\r
     def send(self, obj):\r
         s = self._dumps(obj)\r
         self._conn.send_bytes(s)\r
index cabf580f3d2bd19d797351dedffbbc12310ca255..dd0f07b046b435bc924940e1bf0a48e860a86ef6 100644 (file)
-#\r
-# Support for the API of the multiprocessing package using threads\r
-#\r
-# multiprocessing/dummy/__init__.py\r
-#\r
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
-#\r
-\r
-__all__ = [\r
-    'Process', 'current_process', 'active_children', 'freeze_support',\r
-    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',\r
-    'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'\r
-    ]\r
-\r
-#\r
-# Imports\r
-#\r
-\r
-import threading\r
-import sys\r
-import weakref\r
-import array\r
-import itertools\r
-\r
-from multiprocessing import TimeoutError, cpu_count\r
-from multiprocessing.dummy.connection import Pipe\r
-from threading import Lock, RLock, Semaphore, BoundedSemaphore\r
-from threading import Event\r
-from Queue import Queue\r
-\r
-#\r
-#\r
-#\r
-\r
-class DummyProcess(threading.Thread):\r
-\r
-    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):\r
-        threading.Thread.__init__(self, group, target, name, args, kwargs)\r
-        self._pid = None\r
-        self._children = weakref.WeakKeyDictionary()\r
-        self._start_called = False\r
-        self._parent = current_process()\r
-\r
-    def start(self):\r
-        assert self._parent is current_process()\r
-        self._start_called = True\r
-        self._parent._children[self] = None\r
-        threading.Thread.start(self)\r
-\r
-    def get_exitcode(self):\r
-        if self._start_called and not self.is_alive():\r
-            return 0\r
-        else:\r
-            return None\r
-\r
-    # XXX\r
-    if sys.version_info < (3, 0):\r
-        is_alive = threading.Thread.is_alive.im_func\r
-        get_name = threading.Thread.get_name.im_func\r
-        set_name = threading.Thread.set_name.im_func\r
-        is_daemon = threading.Thread.is_daemon.im_func\r
-        set_daemon = threading.Thread.set_daemon.im_func\r
-    else:\r
-        is_alive = threading.Thread.is_alive\r
-        get_name = threading.Thread.get_name\r
-        set_name = threading.Thread.set_name\r
-        is_daemon = threading.Thread.is_daemon\r
-        set_daemon = threading.Thread.set_daemon\r
-\r
-#\r
-#\r
-#\r
-        \r
-class Condition(threading._Condition):\r
-    # XXX\r
-    if sys.version_info < (3, 0):\r
-        notify_all = threading._Condition.notify_all.im_func\r
-    else:\r
-        notify_all = threading._Condition.notify_all\r
-\r
-#\r
-#\r
-#\r
-\r
-Process = DummyProcess\r
-current_process = threading.current_thread\r
-current_process()._children = weakref.WeakKeyDictionary()\r
-\r
-def active_children():\r
-    children = current_process()._children\r
-    for p in list(children):\r
-        if not p.is_alive():\r
-            children.pop(p, None)\r
-    return list(children)\r
-\r
-def freeze_support():\r
-    pass\r
-\r
-#\r
-#\r
-#\r
-\r
-class Namespace(object):\r
-    def __init__(self, **kwds):\r
-        self.__dict__.update(kwds)\r
-    def __repr__(self):\r
-        items = self.__dict__.items()\r
-        temp = []\r
-        for name, value in items:\r
-            if not name.startswith('_'):\r
-                temp.append('%s=%r' % (name, value))\r
-        temp.sort()\r
-        return 'Namespace(%s)' % str.join(', ', temp)\r
-\r
-dict = dict\r
-list = list\r
-\r
-def Array(typecode, sequence, lock=True):\r
-    return array.array(typecode, sequence)\r
-\r
-class Value(object):\r
-    def __init__(self, typecode, value, lock=True):\r
-        self._typecode = typecode\r
-        self._value = value\r
-    def _get(self):\r
-        return self._value\r
-    def _set(self, value):\r
-        self._value = value\r
-    value = property(_get, _set)\r
-    def __repr__(self):\r
-        return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)\r
-\r
-def Manager():\r
-    return sys.modules[__name__]\r
-\r
-def shutdown():\r
-    pass\r
-\r
-def Pool(processes=None, initializer=None, initargs=()):\r
-    from multiprocessing.pool import ThreadPool\r
-    return ThreadPool(processes, initializer, initargs)\r
-\r
-JoinableQueue = Queue\r
+#
+# Support for the API of the multiprocessing package using threads
+#
+# multiprocessing/dummy/__init__.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [
+    'Process', 'current_process', 'active_children', 'freeze_support',
+    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
+    'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
+    ]
+
+#
+# Imports
+#
+
+import threading
+import sys
+import weakref
+import array
+import itertools
+
+from multiprocessing import TimeoutError, cpu_count
+from multiprocessing.dummy.connection import Pipe
+from threading import Lock, RLock, Semaphore, BoundedSemaphore
+from threading import Event
+from Queue import Queue
+
+#
+#
+#
+
+class DummyProcess(threading.Thread):
+
+    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
+        threading.Thread.__init__(self, group, target, name, args, kwargs)
+        self._pid = None
+        self._children = weakref.WeakKeyDictionary()
+        self._start_called = False
+        self._parent = current_process()
+
+    def start(self):
+        assert self._parent is current_process()
+        self._start_called = True
+        self._parent._children[self] = None
+        threading.Thread.start(self)
+
+    def get_exitcode(self):
+        if self._start_called and not self.is_alive():
+            return 0
+        else:
+            return None
+
+    # XXX
+    if sys.version_info < (3, 0):
+        is_alive = threading.Thread.is_alive.im_func
+        get_name = threading.Thread.get_name.im_func
+        set_name = threading.Thread.set_name.im_func
+        is_daemon = threading.Thread.is_daemon.im_func
+        set_daemon = threading.Thread.set_daemon.im_func
+    else:
+        is_alive = threading.Thread.is_alive
+        get_name = threading.Thread.get_name
+        set_name = threading.Thread.set_name
+        is_daemon = threading.Thread.is_daemon
+        set_daemon = threading.Thread.set_daemon
+
+#
+#
+#
+
+class Condition(threading._Condition):
+    # XXX
+    if sys.version_info < (3, 0):
+        notify_all = threading._Condition.notify_all.im_func
+    else:
+        notify_all = threading._Condition.notify_all
+
+#
+#
+#
+
+Process = DummyProcess
+current_process = threading.current_thread
+current_process()._children = weakref.WeakKeyDictionary()
+
+def active_children():
+    children = current_process()._children
+    for p in list(children):
+        if not p.is_alive():
+            children.pop(p, None)
+    return list(children)
+
+def freeze_support():
+    pass
+
+#
+#
+#
+
+class Namespace(object):
+    def __init__(self, **kwds):
+        self.__dict__.update(kwds)
+    def __repr__(self):
+        items = self.__dict__.items()
+        temp = []
+        for name, value in items:
+            if not name.startswith('_'):
+                temp.append('%s=%r' % (name, value))
+        temp.sort()
+        return 'Namespace(%s)' % str.join(', ', temp)
+
+dict = dict
+list = list
+
+def Array(typecode, sequence, lock=True):
+    return array.array(typecode, sequence)
+
+class Value(object):
+    def __init__(self, typecode, value, lock=True):
+        self._typecode = typecode
+        self._value = value
+    def _get(self):
+        return self._value
+    def _set(self, value):
+        self._value = value
+    value = property(_get, _set)
+    def __repr__(self):
+        return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
+
+def Manager():
+    return sys.modules[__name__]
+
+def shutdown():
+    pass
+
+def Pool(processes=None, initializer=None, initargs=()):
+    from multiprocessing.pool import ThreadPool
+    return ThreadPool(processes, initializer, initargs)
+
+JoinableQueue = Queue
index dd2bcb920267009f38b491273f162521785a6293..4f0a6805de1061b5025bba7af82435489ea22565 100644 (file)
@@ -1,61 +1,61 @@
-#\r
-# Analogue of `multiprocessing.connection` which uses queues instead of sockets\r
-#\r
-# multiprocessing/dummy/connection.py\r
-#\r
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
-#\r
-\r
-__all__ = [ 'Client', 'Listener', 'Pipe' ]\r
-\r
-from Queue import Queue\r
-\r
-\r
-families = [None]\r
-\r
-\r
-class Listener(object):\r
-\r
-    def __init__(self, address=None, family=None, backlog=1):\r
-        self._backlog_queue = Queue(backlog)\r
-\r
-    def accept(self):\r
-        return Connection(*self._backlog_queue.get())\r
-\r
-    def close(self):\r
-        self._backlog_queue = None\r
-\r
-    address = property(lambda self: self._backlog_queue)\r
-\r
-\r
-def Client(address):\r
-    _in, _out = Queue(), Queue()\r
-    address.put((_out, _in))\r
-    return Connection(_in, _out)\r
-\r
-\r
-def Pipe(duplex=True):\r
-    a, b = Queue(), Queue()\r
-    return Connection(a, b), Connection(b, a)\r
-\r
-\r
-class Connection(object):\r
-\r
-    def __init__(self, _in, _out):\r
-        self._out = _out\r
-        self._in = _in\r
-        self.send = self.send_bytes = _out.put\r
-        self.recv = self.recv_bytes = _in.get\r
-\r
-    def poll(self, timeout=0.0):\r
-        if self._in.qsize() > 0:\r
-            return True\r
-        if timeout <= 0.0:\r
-            return False\r
-        self._in.not_empty.acquire()\r
-        self._in.not_empty.wait(timeout)\r
-        self._in.not_empty.release()\r
-        return self._in.qsize() > 0\r
-\r
-    def close(self):\r
-        pass\r
+#
+# Analogue of `multiprocessing.connection` which uses queues instead of sockets
+#
+# multiprocessing/dummy/connection.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [ 'Client', 'Listener', 'Pipe' ]
+
+from Queue import Queue
+
+
+families = [None]
+
+
+class Listener(object):
+
+    def __init__(self, address=None, family=None, backlog=1):
+        self._backlog_queue = Queue(backlog)
+
+    def accept(self):
+        return Connection(*self._backlog_queue.get())
+
+    def close(self):
+        self._backlog_queue = None
+
+    address = property(lambda self: self._backlog_queue)
+
+
+def Client(address):
+    _in, _out = Queue(), Queue()
+    address.put((_out, _in))
+    return Connection(_in, _out)
+
+
+def Pipe(duplex=True):
+    a, b = Queue(), Queue()
+    return Connection(a, b), Connection(b, a)
+
+
+class Connection(object):
+
+    def __init__(self, _in, _out):
+        self._out = _out
+        self._in = _in
+        self.send = self.send_bytes = _out.put
+        self.recv = self.recv_bytes = _in.get
+
+    def poll(self, timeout=0.0):
+        if self._in.qsize() > 0:
+            return True
+        if timeout <= 0.0:
+            return False
+        self._in.not_empty.acquire()
+        self._in.not_empty.wait(timeout)
+        self._in.not_empty.release()
+        return self._in.qsize() > 0
+
+    def close(self):
+        pass
index 2c1d3cf99590623aea2bc02a60aa20cc9b2b3c33..6107f07a41eb45321ab19048c91458661ad6272d 100644 (file)
@@ -92,7 +92,7 @@ if sys.platform != 'win32':
                 except OSError, e:\r
                     if self.wait(timeout=0.1) is None:\r
                         raise\r
-                    \r
+\r
         @staticmethod\r
         def thread_is_spawning():\r
             return False\r
@@ -107,10 +107,10 @@ else:
     import _subprocess\r
     import copy_reg\r
     import time\r
-    \r
+\r
     from ._multiprocessing import win32, Connection, PipeConnection\r
     from .util import Finalize\r
-    \r
+\r
     try:\r
         from cPickle import dump, load, HIGHEST_PROTOCOL\r
     except ImportError:\r
@@ -217,7 +217,7 @@ else:
                     if code == TERMINATE:\r
                         code = -signal.SIGTERM\r
                     self.returncode = code\r
-                    \r
+\r
             return self.returncode\r
 \r
         def poll(self):\r
@@ -230,7 +230,7 @@ else:
                 except WindowsError:\r
                     if self.wait(timeout=0.1) is None:\r
                         raise\r
-        \r
+\r
     #\r
     #\r
     #\r
@@ -308,7 +308,7 @@ else:
         Return info about parent needed by child to unpickle process object\r
         '''\r
         from .util import _logger, _log_to_stderr\r
-        \r
+\r
         d = dict(\r
             name=name,\r
             sys_path=sys.path,\r
@@ -317,7 +317,7 @@ else:
             orig_dir=process.ORIGINAL_DIR,\r
             authkey=process.current_process().get_authkey(),\r
             )\r
-        \r
+\r
         if _logger is not None:\r
             d['log_level'] = _logger.getEffectiveLevel()\r
 \r
@@ -336,7 +336,7 @@ else:
     #\r
     # Make (Pipe)Connection picklable\r
     #\r
-    \r
+\r
     def reduce_connection(conn):\r
         if not Popen.thread_is_spawning():\r
             raise RuntimeError(\r
@@ -345,7 +345,7 @@ else:
                 )\r
         return type(conn), (Popen.duplicate_for_child(conn.fileno()),\r
                             conn.readable, conn.writable)\r
-    \r
+\r
     copy_reg.pickle(Connection, reduce_connection)\r
     copy_reg.pickle(PipeConnection, reduce_connection)\r
 \r
@@ -367,7 +367,7 @@ def prepare(data):
 \r
     if 'authkey' in data:\r
         process.current_process()._authkey = data['authkey']\r
-    \r
+\r
     if 'log_to_stderr' in data and data['log_to_stderr']:\r
         util.log_to_stderr()\r
 \r
index 7e596ca70fa78e67576e38534f265c9b94112ba7..f6b34042f8e6ac9926f908d12c5bf8b7369c6afd 100644 (file)
-#
-# Module which supports allocation of memory from an mmap
-#
-# multiprocessing/heap.py
-#
-# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
-#
-
-import bisect
-import mmap
-import tempfile
-import os
-import sys
-import threading
-import itertools
-
-import _multiprocessing
-from multiprocessing.util import Finalize, info
-from multiprocessing.forking import assert_spawning
-
-__all__ = ['BufferWrapper']
-
-#
-# Inheirtable class which wraps an mmap, and from which blocks can be allocated
-#
-
-if sys.platform == 'win32':
-
-    from ._multiprocessing import win32
-
-    class Arena(object):
-
-        _counter = itertools.count()
-
-        def __init__(self, size):
-            self.size = size
-            self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
-            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
-            assert win32.GetLastError() == 0, 'tagname already in use'
-            self._state = (self.size, self.name)
-
-        def __getstate__(self):
-            assert_spawning(self)
-            return self._state
-
-        def __setstate__(self, state):
-            self.size, self.name = self._state = state
-            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
-            assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
-
-else:
-
-    class Arena(object):
-
-        def __init__(self, size):
-            self.buffer = mmap.mmap(-1, size)
-            self.size = size
-            self.name = None
-
-#
-# Class allowing allocation of chunks of memory from arenas
-#
-
-class Heap(object):
-
-    _alignment = 8
-
-    def __init__(self, size=mmap.PAGESIZE):
-        self._lastpid = os.getpid()
-        self._lock = threading.Lock()
-        self._size = size
-        self._lengths = []
-        self._len_to_seq = {}
-        self._start_to_block = {}
-        self._stop_to_block = {}
-        self._allocated_blocks = set()
-        self._arenas = []
-
-    @staticmethod
-    def _roundup(n, alignment):
-        # alignment must be a power of 2
-        mask = alignment - 1
-        return (n + mask) & ~mask
-
-    def _malloc(self, size):
-        # returns a large enough block -- it might be much larger
-        i = bisect.bisect_left(self._lengths, size)
-        if i == len(self._lengths):
-            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
-            self._size *= 2
-            info('allocating a new mmap of length %d', length)
-            arena = Arena(length)
-            self._arenas.append(arena)
-            return (arena, 0, length)
-        else:
-            length = self._lengths[i]
-            seq = self._len_to_seq[length]
-            block = seq.pop()
-            if not seq:
-                del self._len_to_seq[length], self._lengths[i]
-
-        (arena, start, stop) = block
-        del self._start_to_block[(arena, start)]
-        del self._stop_to_block[(arena, stop)]
-        return block
-
-    def _free(self, block):
-        # free location and try to merge with neighbours
-        (arena, start, stop) = block
-
-        try:
-            prev_block = self._stop_to_block[(arena, start)]
-        except KeyError:
-            pass
-        else:
-            start, _ = self._absorb(prev_block)
-
-        try:
-            next_block = self._start_to_block[(arena, stop)]
-        except KeyError:
-            pass
-        else:
-            _, stop = self._absorb(next_block)
-
-        block = (arena, start, stop)
-        length = stop - start
-
-        try:
-            self._len_to_seq[length].append(block)
-        except KeyError:
-            self._len_to_seq[length] = [block]
-            bisect.insort(self._lengths, length)
-
-        self._start_to_block[(arena, start)] = block
-        self._stop_to_block[(arena, stop)] = block
-
-    def _absorb(self, block):
-        # deregister this block so it can be merged with a neighbour
-        (arena, start, stop) = block
-        del self._start_to_block[(arena, start)]
-        del self._stop_to_block[(arena, stop)]
-
-        length = stop - start
-        seq = self._len_to_seq[length]
-        seq.remove(block)
-        if not seq:
-            del self._len_to_seq[length]
-            self._lengths.remove(length)
-
-        return start, stop
-
-    def free(self, block):
-        # free a block returned by malloc()
-        assert os.getpid() == self._lastpid
-        self._lock.acquire()
-        try:
-            self._allocated_blocks.remove(block)
-            self._free(block)
-        finally:
-            self._lock.release()
-
-    def malloc(self, size):
-        # return a block of right size (possibly rounded up)
-        assert 0 <= size < sys.maxint
-        if os.getpid() != self._lastpid:
-            self.__init__()                     # reinitialize after fork
-        self._lock.acquire()
-        try:
-            size = self._roundup(max(size,1), self._alignment)
-            (arena, start, stop) = self._malloc(size)
-            new_stop = start + size
-            if new_stop < stop:
-                self._free((arena, new_stop, stop))
-            block = (arena, start, new_stop)
-            self._allocated_blocks.add(block)
-            return block
-        finally:
-            self._lock.release()
-
-#
-# Class representing a chunk of an mmap -- can be inherited
-#
-
-class BufferWrapper(object):
-
-    _heap = Heap()
-
-    def __init__(self, size):
-        assert 0 <= size < sys.maxint
-        block = BufferWrapper._heap.malloc(size)
-        self._state = (block, size)
-        Finalize(self, BufferWrapper._heap.free, args=(block,))
-
-    def get_address(self):
-        (arena, start, stop), size = self._state
-        address, length = _multiprocessing.address_of_buffer(arena.buffer)
-        assert size <= length
-        return address + start
-
-    def get_size(self):
-        return self._state[1]
+#\r
+# Module which supports allocation of memory from an mmap\r
+#\r
+# multiprocessing/heap.py\r
+#\r
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt\r
+#\r
+\r
+import bisect\r
+import mmap\r
+import tempfile\r
+import os\r
+import sys\r
+import threading\r
+import itertools\r
+\r
+import _multiprocessing\r
+from multiprocessing.util import Finalize, info\r
+from multiprocessing.forking import assert_spawning\r
+\r
+__all__ = ['BufferWrapper']\r
+\r
+#\r
+# Inheirtable class which wraps an mmap, and from which blocks can be allocated\r
+#\r
+\r
+if sys.platform == 'win32':\r
+\r
+    from ._multiprocessing import win32\r
+\r
+    class Arena(object):\r
+\r
+        _counter = itertools.count()\r
+\r
+        def __init__(self, size):\r
+            self.size = size\r
+            self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())\r
+            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)\r
+            assert win32.GetLastError() == 0, 'tagname already in use'\r
+            self._state = (self.size, self.name)\r
+\r
+        def __getstate__(self):\r
+            assert_spawning(self)\r
+            return self._state\r
+\r
+        def __setstate__(self, state):\r
+            self.size, self.name = self._state = state\r
+            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)\r
+            assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS\r
+\r
+else:\r
+\r
+    class Arena(object):\r
+\r
+        def __init__(self, size):\r
+            self.buffer = mmap.mmap(-1, size)\r
+            self.size = size\r
+            self.name = None\r
+\r
+#\r
+# Class allowing allocation of chunks of memory from arenas\r
+#\r
+\r
+class Heap(object):\r
+\r
+    _alignment = 8\r
+\r
+    def __init__(self, size=mmap.PAGESIZE):\r
+        self._lastpid = os.getpid()\r
+        self._lock = threading.Lock()\r
+        self._size = size\r
+        self._lengths = []\r
+        self._len_to_seq = {}\r
+        self._start_to_block = {}\r
+        self._stop_to_block = {}\r
+        self._allocated_blocks = set()\r
+        self._arenas = []\r
+\r
+    @staticmethod\r
+    def _roundup(n, alignment):\r
+        # alignment must be a power of 2\r
+        mask = alignment - 1\r
+        return (n + mask) & ~mask\r
+\r
+    def _malloc(self, size):\r
+        # returns a large enough block -- it might be much larger\r
+        i = bisect.bisect_left(self._lengths, size)\r
+        if i == len(self._lengths):\r
+            length = self._roundup(max(self._size, size), mmap.PAGESIZE)\r
+            self._size *= 2\r
+            info('allocating a new mmap of length %d', length)\r
+            arena = Arena(length)\r
+            self._arenas.append(arena)\r
+            return (arena, 0, length)\r
+        else:\r
+            length = self._lengths[i]\r
+            seq = self._len_to_seq[length]\r
+            block = seq.pop()\r
+            if not seq:\r
+                del self._len_to_seq[length], self._lengths[i]\r
+\r
+        (arena, start, stop) = block\r
+        del self._start_to_block[(arena, start)]\r
+        del self._stop_to_block[(arena, stop)]\r
+        return block\r
+\r
+    def _free(self, block):\r
+        # free location and try to merge with neighbours\r
+        (arena, start, stop) = block\r
+\r
+        try:\r
+            prev_block = self._stop_to_block[(arena, start)]\r
+        except KeyError:\r
+            pass\r
+        else:\r
+            start, _ = self._absorb(prev_block)\r
+\r
+        try:\r
+            next_block = self._start_to_block[(arena, stop)]\r
+        except KeyError:\r
+            pass\r
+        else:\r
+            _, stop = self._absorb(next_block)\r
+\r
+        block = (arena, start, stop)\r
+        length = stop - start\r
+\r
+        try:\r
+            self._len_to_seq[length].append(block)\r
+        except KeyError:\r
+            self._len_to_seq[length] = [block]\r
+            bisect.insort(self._lengths, length)\r
+\r
+        self._start_to_block[(arena, start)] = block\r
+        self._stop_to_block[(arena, stop)] = block\r
+\r
+    def _absorb(self, block):\r
+        # deregister this block so it can be merged with a neighbour\r
+        (arena, start, stop) = block\r
+        del self._start_to_block[(arena, start)]\r
+        del self._stop_to_block[(arena, stop)]\r
+\r
+        length = stop - start\r
+        seq = self._len_to_seq[length]\r
+        seq.remove(block)\r
+        if not seq:\r
+            del self._len_to_seq[length]\r
+            self._lengths.remove(length)\r
+\r
+        return start, stop\r
+\r
+    def free(self, block):\r
+        # free a block returned by malloc()\r
+        assert os.getpid() == self._lastpid\r
+        self._lock.acquire()\r
+        try:\r
+            self._allocated_blocks.remove(block)\r
+            self._free(block)\r
+        finally:\r
+            self._lock.release()\r
+\r
+    def malloc(self, size):\r
+        # return a block of right size (possibly rounded up)\r
+        assert 0 <= size < sys.maxint\r
+        if os.getpid() != self._lastpid:\r
+            self.__init__()                     # reinitialize after fork\r
+        self._lock.acquire()\r
+        try:\r
+            size = self._roundup(max(size,1), self._alignment)\r
+            (arena, start, stop) = self._malloc(size)\r
+            new_stop = start + size\r
+            if new_stop < stop:\r
+                self._free((arena, new_stop, stop))\r
+            block = (arena, start, new_stop)\r
+            self._allocated_blocks.add(block)\r
+            return block\r
+        finally:\r
+            self._lock.release()\r
+\r
+#\r
+# Class representing a chunk of an mmap -- can be inherited\r
+#\r
+\r
+class BufferWrapper(object):\r
+\r
+    _heap = Heap()\r
+\r
+    def __init__(self, size):\r
+        assert 0 <= size < sys.maxint\r
+        block = BufferWrapper._heap.malloc(size)\r
+        self._state = (block, size)\r
+        Finalize(self, BufferWrapper._heap.free, args=(block,))\r
+\r
+    def get_address(self):\r
+        (arena, start, stop), size = self._state\r
+        address, length = _multiprocessing.address_of_buffer(arena.buffer)\r
+        assert size <= length\r
+        return address + start\r
+\r
+    def get_size(self):\r
+        return self._state[1]\r
index 908c193de8c5421d7773f4b56f078e78a2a913b5..6c1d912c3cab31f446de2c3ceb01954772f17937 100644 (file)
@@ -40,7 +40,7 @@ try:
     bytes\r
 except NameError:\r
     bytes = str                  # XXX not needed in Py2.6 and Py3.0\r
-    \r
+\r
 #\r
 # Register some things for pickling\r
 #\r
@@ -55,7 +55,7 @@ if view_types[0] is not list:       # XXX only needed in Py3.0
         return list, (list(obj),)\r
     for view_type in view_types:\r
         copy_reg.pickle(view_type, rebuild_as_list)\r
-    \r
+\r
 #\r
 # Type for identifying shared objects\r
 #\r
@@ -104,7 +104,7 @@ def convert_to_error(kind, result):
         return RemoteError('Unserializable message: %s\n' % result)\r
     else:\r
         return ValueError('Unrecognized message type')\r
-        \r
+\r
 class RemoteError(Exception):\r
     def __str__(self):\r
         return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)\r
@@ -340,7 +340,7 @@ class Server(object):
                     util.debug('resetting stdout, stderr')\r
                     sys.stdout = sys.__stdout__\r
                     sys.stderr = sys.__stderr__\r
-                    \r
+\r
                 util._run_finalizers(0)\r
 \r
                 for p in active_children():\r
@@ -358,7 +358,7 @@ class Server(object):
                 traceback.print_exc()\r
         finally:\r
             exit(0)\r
-            \r
+\r
     def create(self, c, typeid, *args, **kwds):\r
         '''\r
         Create a new shared object and return its id\r
@@ -367,7 +367,7 @@ class Server(object):
         try:\r
             callable, exposed, method_to_typeid, proxytype = \\r
                       self.registry[typeid]\r
-            \r
+\r
             if callable is None:\r
                 assert len(args) == 1 and not kwds\r
                 obj = args[0]\r
@@ -456,7 +456,7 @@ class BaseManager(object):
     '''\r
     _registry = {}\r
     _Server = Server\r
-    \r
+\r
     def __init__(self, address=None, authkey=None, serializer='pickle'):\r
         if authkey is None:\r
             authkey = current_process().get_authkey()\r
@@ -487,7 +487,7 @@ class BaseManager(object):
         conn = Client(self._address, authkey=self._authkey)\r
         dispatch(conn, None, 'dummy')\r
         self._state.value = State.STARTED\r
-        \r
+\r
     def start(self):\r
         '''\r
         Spawn a server process for this manager object\r
@@ -570,10 +570,10 @@ class BaseManager(object):
         Return the number of shared objects\r
         '''\r
         conn = self._Client(self._address, authkey=self._authkey)\r
-        try:        \r
+        try:\r
             return dispatch(conn, None, 'number_of_objects')\r
         finally:\r
-            conn.close()        \r
+            conn.close()\r
 \r
     def __enter__(self):\r
         return self\r
@@ -612,7 +612,7 @@ class BaseManager(object):
             del BaseProxy._address_to_local[address]\r
         except KeyError:\r
             pass\r
-        \r
+\r
     address = property(lambda self: self._address)\r
 \r
     @classmethod\r
@@ -640,7 +640,7 @@ class BaseManager(object):
         cls._registry[typeid] = (\r
             callable, exposed, method_to_typeid, proxytype\r
             )\r
-        \r
+\r
         if create_method:\r
             def temp(self, *args, **kwds):\r
                 util.debug('requesting creation of a shared %r object', typeid)\r
@@ -709,9 +709,9 @@ class BaseProxy(object):
 \r
         if incref:\r
             self._incref()\r
-            \r
+\r
         util.register_after_fork(self, BaseProxy._after_fork)\r
-        \r
+\r
     def _connect(self):\r
         util.debug('making connection to manager')\r
         name = current_process().get_name()\r
@@ -720,7 +720,7 @@ class BaseProxy(object):
         conn = self._Client(self._token.address, authkey=self._authkey)\r
         dispatch(conn, None, 'accept_connection', (name,))\r
         self._tls.connection = conn\r
-        \r
+\r
     def _callmethod(self, methodname, args=(), kwds={}):\r
         '''\r
         Try to call a method of the referrent and return a copy of the result\r
@@ -735,7 +735,7 @@ class BaseProxy(object):
 \r
         conn.send((self._id, methodname, args, kwds))\r
         kind, result = conn.recv()\r
-        \r
+\r
         if kind == '#RETURN':\r
             return result\r
         elif kind == '#PROXY':\r
@@ -793,7 +793,7 @@ class BaseProxy(object):
                        threading.current_thread().get_name())\r
             tls.connection.close()\r
             del tls.connection\r
-            \r
+\r
     def _after_fork(self):\r
         self._manager = None\r
         try:\r
@@ -806,7 +806,7 @@ class BaseProxy(object):
         kwds = {}\r
         if Popen.thread_is_spawning():\r
             kwds['authkey'] = self._authkey\r
-        \r
+\r
         if getattr(self, '_isauto', False):\r
             kwds['exposed'] = self._exposed_\r
             return (RebuildProxy,\r
@@ -817,7 +817,7 @@ class BaseProxy(object):
 \r
     def __deepcopy__(self, memo):\r
         return self._getvalue()\r
-    \r
+\r
     def __repr__(self):\r
         return '<%s object, typeid %r at %s>' % \\r
                (type(self).__name__, self._token.typeid, '0x%x' % id(self))\r
@@ -842,7 +842,7 @@ def RebuildProxy(func, token, serializer, kwds):
     If possible the shared object is returned, or otherwise a proxy for it.\r
     '''\r
     server = getattr(current_process(), '_manager_server', None)\r
-    \r
+\r
     if server and server.address == token.address:\r
         return server.id_to_obj[token.id][0]\r
     else:\r
@@ -884,7 +884,7 @@ def AutoProxy(token, serializer, manager=None, authkey=None,
     Return an auto-proxy for `token`\r
     '''\r
     _Client = listener_client[serializer][1]\r
-    \r
+\r
     if exposed is None:\r
         conn = _Client(token.address, authkey=authkey)\r
         try:\r
@@ -995,7 +995,7 @@ class NamespaceProxy(BaseProxy):
         if key[0] == '_':\r
             return object.__getattribute__(self, key)\r
         callmethod = object.__getattribute__(self, '_callmethod')\r
-        return callmethod('__getattribute__', (key,))    \r
+        return callmethod('__getattribute__', (key,))\r
     def __setattr__(self, key, value):\r
         if key[0] == '_':\r
             return object.__setattr__(self, key, value)\r
@@ -1007,7 +1007,7 @@ class NamespaceProxy(BaseProxy):
         callmethod = object.__getattribute__(self, '_callmethod')\r
         return callmethod('__delattr__', (key,))\r
 \r
-    \r
+\r
 class ValueProxy(BaseProxy):\r
     _exposed_ = ('get', 'set')\r
     def get(self):\r
@@ -1063,10 +1063,10 @@ PoolProxy._method_to_typeid_ = {
 class SyncManager(BaseManager):\r
     '''\r
     Subclass of `BaseManager` which supports a number of shared object types.\r
-    \r
+\r
     The types registered are those intended for the synchronization\r
     of threads, plus `dict`, `list` and `Namespace`.\r
-    \r
+\r
     The `multiprocessing.Manager()` function creates started instances of\r
     this class.\r
     '''\r
index 0255c866535d07b61339614427180c5e9e909bb1..79f0a2929ca2c63140f2af297b4517f0b42296fc 100644 (file)
@@ -58,18 +58,18 @@ def worker(inqueue, outqueue, initializer=None, initargs=()):
         except (EOFError, IOError):\r
             debug('worker got EOFError or IOError -- exiting')\r
             break\r
-        \r
+\r
         if task is None:\r
             debug('worker got sentinel -- exiting')\r
             break\r
-            \r
+\r
         job, i, func, args, kwds = task\r
         try:\r
             result = (True, func(*args, **kwds))\r
         except Exception, e:\r
             result = (False, e)\r
         put((job, i, result))\r
-    \r
+\r
 #\r
 # Class representing a process pool\r
 #\r
@@ -91,7 +91,7 @@ class Pool(object):
                 processes = cpu_count()\r
             except NotImplementedError:\r
                 processes = 1\r
-            \r
+\r
         self._pool = []\r
         for i in range(processes):\r
             w = self.Process(\r
@@ -102,7 +102,7 @@ class Pool(object):
             w.set_name(w.get_name().replace('Process', 'PoolWorker'))\r
             w.set_daemon(True)\r
             w.start()\r
-            \r
+\r
         self._task_handler = threading.Thread(\r
             target=Pool._handle_tasks,\r
             args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)\r
@@ -132,7 +132,7 @@ class Pool(object):
         self._outqueue = SimpleQueue()\r
         self._quick_put = self._inqueue._writer.send\r
         self._quick_get = self._outqueue._reader.recv\r
-        \r
+\r
     def apply(self, func, args=(), kwds={}):\r
         '''\r
         Equivalent of `apply()` builtin\r
@@ -182,7 +182,7 @@ class Pool(object):
             self._taskqueue.put((((result._job, i, mapstar, (x,), {})\r
                      for i, x in enumerate(task_batches)), result._set_length))\r
             return (item for chunk in result for item in chunk)\r
-            \r
+\r
     def apply_async(self, func, args=(), kwds={}, callback=None):\r
         '''\r
         Asynchronous equivalent of `apply()` builtin\r
@@ -199,12 +199,12 @@ class Pool(object):
         assert self._state == RUN\r
         if not hasattr(iterable, '__len__'):\r
             iterable = list(iterable)\r
-        \r
+\r
         if chunksize is None:\r
             chunksize, extra = divmod(len(iterable), len(self._pool) * 4)\r
             if extra:\r
                 chunksize += 1\r
-                \r
+\r
         task_batches = Pool._get_tasks(func, iterable, chunksize)\r
         result = MapResult(self._cache, chunksize, len(iterable), callback)\r
         self._taskqueue.put((((result._job, i, mapstar, (x,), {})\r
@@ -234,13 +234,13 @@ class Pool(object):
             break\r
         else:\r
             debug('task handler got sentinel')\r
-            \r
+\r
 \r
         try:\r
             # tell result handler to finish when cache is empty\r
             debug('task handler sending sentinel to result handler')\r
             outqueue.put(None)\r
-            \r
+\r
             # tell workers there is no more work\r
             debug('task handler sending sentinel to workers')\r
             for p in pool:\r
@@ -260,12 +260,12 @@ class Pool(object):
             except (IOError, EOFError):\r
                 debug('result handler got EOFError/IOError -- exiting')\r
                 return\r
-            \r
+\r
             if thread._state:\r
                 assert thread._state == TERMINATE\r
                 debug('result handler found thread._state=TERMINATE')\r
                 break\r
-            \r
+\r
             if task is None:\r
                 debug('result handler got sentinel')\r
                 break\r
@@ -321,7 +321,7 @@ class Pool(object):
         raise NotImplementedError(\r
               'pool objects cannot be passed between processes or pickled'\r
               )\r
-    \r
+\r
     def close(self):\r
         debug('closing pool')\r
         if self._state == RUN:\r
@@ -355,7 +355,7 @@ class Pool(object):
                         task_handler, result_handler, cache):\r
         # this is guaranteed to only be called once\r
         debug('finalizing pool')\r
-        \r
+\r
         task_handler._state = TERMINATE\r
         taskqueue.put(None)                 # sentinel\r
 \r
@@ -363,7 +363,7 @@ class Pool(object):
         cls._help_stuff_finish(inqueue, task_handler, len(pool))\r
 \r
         assert result_handler.is_alive() or len(cache) == 0\r
-        \r
+\r
         result_handler._state = TERMINATE\r
         outqueue.put(None)                  # sentinel\r
 \r
@@ -396,14 +396,14 @@ class ApplyResult(object):
         self._ready = False\r
         self._callback = callback\r
         cache[self._job] = self\r
-        \r
+\r
     def ready(self):\r
         return self._ready\r
-    \r
+\r
     def successful(self):\r
         assert self._ready\r
         return self._success\r
-    \r
+\r
     def wait(self, timeout=None):\r
         self._cond.acquire()\r
         try:\r
@@ -438,7 +438,7 @@ class ApplyResult(object):
 #\r
 \r
 class MapResult(ApplyResult):\r
-    \r
+\r
     def __init__(self, cache, chunksize, length, callback):\r
         ApplyResult.__init__(self, cache, callback)\r
         self._success = True\r
@@ -449,7 +449,7 @@ class MapResult(ApplyResult):
             self._ready = True\r
         else:\r
             self._number_left = length//chunksize + bool(length % chunksize)\r
-        \r
+\r
     def _set(self, i, success_result):\r
         success, result = success_result\r
         if success:\r
@@ -492,10 +492,10 @@ class IMapIterator(object):
         self._length = None\r
         self._unsorted = {}\r
         cache[self._job] = self\r
-        \r
+\r
     def __iter__(self):\r
         return self\r
-    \r
+\r
     def next(self, timeout=None):\r
         self._cond.acquire()\r
         try:\r
@@ -520,7 +520,7 @@ class IMapIterator(object):
         raise value\r
 \r
     __next__ = next                    # XXX\r
-    \r
+\r
     def _set(self, i, obj):\r
         self._cond.acquire()\r
         try:\r
@@ -534,12 +534,12 @@ class IMapIterator(object):
                 self._cond.notify()\r
             else:\r
                 self._unsorted[i] = obj\r
-                \r
+\r
             if self._index == self._length:\r
                 del self._cache[self._job]\r
         finally:\r
             self._cond.release()\r
-            \r
+\r
     def _set_length(self, length):\r
         self._cond.acquire()\r
         try:\r
@@ -572,18 +572,18 @@ class IMapUnorderedIterator(IMapIterator):
 #\r
 \r
 class ThreadPool(Pool):\r
-    \r
+\r
     from .dummy import Process\r
-    \r
+\r
     def __init__(self, processes=None, initializer=None, initargs=()):\r
         Pool.__init__(self, processes, initializer, initargs)\r
-        \r
+\r
     def _setup_queues(self):\r
         self._inqueue = Queue.Queue()\r
         self._outqueue = Queue.Queue()\r
         self._quick_put = self._inqueue.put\r
         self._quick_get = self._outqueue.get\r
-        \r
+\r
     @staticmethod\r
     def _help_stuff_finish(inqueue, task_handler, size):\r
         # put sentinels at head of inqueue to make workers finish\r
index 915d8634c621bf14c0a06d709820f9311629b942..43d8297672f0eca17c9a146dfee1449ab4839a96 100644 (file)
@@ -47,7 +47,7 @@ def active_children():
     '''\r
     _cleanup()\r
     return list(_current_process._children)\r
-    \r
+\r
 #\r
 #\r
 #\r
@@ -69,7 +69,7 @@ class Process(object):
     The class is analagous to `threading.Thread`\r
     '''\r
     _Popen = None\r
-    \r
+\r
     def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):\r
         assert group is None, 'group argument must be None for now'\r
         count = _current_process._counter.next()\r
@@ -91,7 +91,7 @@ class Process(object):
         '''\r
         if self._target:\r
             self._target(*self._args, **self._kwargs)\r
-            \r
+\r
     def start(self):\r
         '''\r
         Start child process\r
@@ -114,7 +114,7 @@ class Process(object):
         Terminate process; sends SIGTERM signal or uses TerminateProcess()\r
         '''\r
         self._popen.terminate()\r
-        \r
+\r
     def join(self, timeout=None):\r
         '''\r
         Wait until child process terminates\r
@@ -217,11 +217,11 @@ class Process(object):
                                    status, self._daemonic and ' daemon' or '')\r
 \r
     ##\r
-        \r
+\r
     def _bootstrap(self):\r
         from . import util\r
         global _current_process\r
-        \r
+\r
         try:\r
             self._children = set()\r
             self._counter = itertools.count(1)\r
index 78cb36225cf15806560f0710e6236dbeeb87e22d..ea8909055932f34f6a63723c6e47447923dba4f7 100644 (file)
@@ -41,9 +41,9 @@ class Queue(object):
         else:\r
             self._wlock = Lock()\r
         self._sem = BoundedSemaphore(maxsize)\r
-            \r
+\r
         self._after_fork()\r
-        \r
+\r
         if sys.platform != 'win32':\r
             register_after_fork(self, Queue._after_fork)\r
 \r
@@ -51,12 +51,12 @@ class Queue(object):
         assert_spawning(self)\r
         return (self._maxsize, self._reader, self._writer,\r
                 self._rlock, self._wlock, self._sem, self._opid)\r
-    \r
+\r
     def __setstate__(self, state):\r
         (self._maxsize, self._reader, self._writer,\r
          self._rlock, self._wlock, self._sem, self._opid) = state\r
         self._after_fork()\r
-        \r
+\r
     def _after_fork(self):\r
         debug('Queue._after_fork()')\r
         self._notempty = threading.Condition(threading.Lock())\r
@@ -69,7 +69,7 @@ class Queue(object):
         self._send = self._writer.send\r
         self._recv = self._reader.recv\r
         self._poll = self._reader.poll\r
-        \r
+\r
     def put(self, obj, block=True, timeout=None):\r
         assert not self._closed\r
         if not self._sem.acquire(block, timeout):\r
@@ -93,7 +93,7 @@ class Queue(object):
                 return res\r
             finally:\r
                 self._rlock.release()\r
-                \r
+\r
         else:\r
             if block:\r
                 deadline = time.time() + timeout\r
@@ -135,7 +135,7 @@ class Queue(object):
         assert self._closed\r
         if self._jointhread:\r
             self._jointhread()\r
-    \r
+\r
     def cancel_join_thread(self):\r
         debug('Queue.cancel_join_thread()')\r
         self._joincancelled = True\r
@@ -146,7 +146,7 @@ class Queue(object):
 \r
     def _start_thread(self):\r
         debug('Queue._start_thread()')\r
-        \r
+\r
         # Start thread which transfers data from buffer to pipe\r
         self._buffer.clear()\r
         self._thread = threading.Thread(\r
@@ -174,14 +174,14 @@ class Queue(object):
                 [weakref.ref(self._thread)],\r
                 exitpriority=-5\r
                 )\r
-            \r
+\r
         # Send sentinel to the thread queue object when garbage collected\r
         self._close = Finalize(\r
             self, Queue._finalize_close,\r
             [self._buffer, self._notempty],\r
             exitpriority=10\r
             )\r
-        \r
+\r
     @staticmethod\r
     def _finalize_join(twr):\r
         debug('joining queue thread')\r
@@ -191,7 +191,7 @@ class Queue(object):
             debug('... queue thread joined')\r
         else:\r
             debug('... queue thread already dead')\r
-            \r
+\r
     @staticmethod\r
     def _finalize_close(buffer, notempty):\r
         debug('telling queue thread to quit')\r
@@ -206,7 +206,7 @@ class Queue(object):
     def _feed(buffer, notempty, send, writelock, close):\r
         debug('starting thread to feed data to pipe')\r
         from .util import is_exiting\r
-        \r
+\r
         nacquire = notempty.acquire\r
         nrelease = notempty.release\r
         nwait = notempty.wait\r
@@ -217,7 +217,7 @@ class Queue(object):
             wrelease = writelock.release\r
         else:\r
             wacquire = None\r
-        \r
+\r
         try:\r
             while 1:\r
                 nacquire()\r
@@ -257,7 +257,7 @@ class Queue(object):
                     traceback.print_exc()\r
             except Exception:\r
                 pass\r
-            \r
+\r
 _sentinel = object()\r
 \r
 #\r
@@ -274,7 +274,7 @@ class JoinableQueue(Queue):
         Queue.__init__(self, maxsize)\r
         self._unfinished_tasks = Semaphore(0)\r
         self._cond = Condition()\r
-        \r
+\r
     def __getstate__(self):\r
         return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)\r
 \r
@@ -285,7 +285,7 @@ class JoinableQueue(Queue):
     def put(self, item, block=True, timeout=None):\r
         Queue.put(self, item, block, timeout)\r
         self._unfinished_tasks.release()\r
-        \r
+\r
     def task_done(self):\r
         self._cond.acquire()\r
         try:\r
@@ -295,7 +295,7 @@ class JoinableQueue(Queue):
                 self._cond.notify_all()\r
         finally:\r
             self._cond.release()\r
-            \r
+\r
     def join(self):\r
         self._cond.acquire()\r
         try:\r
index 0d6cf4f844b347a3a1de6604f3e5b879a75c06ba..17778ef803fbd80e1e3d463fc9421098d2c0d15d 100644 (file)
@@ -36,7 +36,7 @@ if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
 if sys.platform == 'win32':\r
     import _subprocess\r
     from ._multiprocessing import win32\r
-    \r
+\r
     def send_handle(conn, handle, destination_pid):\r
         process_handle = win32.OpenProcess(\r
             win32.PROCESS_ALL_ACCESS, False, destination_pid\r
@@ -46,14 +46,14 @@ if sys.platform == 'win32':
             conn.send(new_handle)\r
         finally:\r
             close(process_handle)\r
-            \r
+\r
     def recv_handle(conn):\r
         return conn.recv()\r
 \r
 else:\r
     def send_handle(conn, handle, destination_pid):\r
         _multiprocessing.sendfd(conn.fileno(), handle)\r
-        \r
+\r
     def recv_handle(conn):\r
         return _multiprocessing.recvfd(conn.fileno())\r
 \r
@@ -93,7 +93,7 @@ def _get_listener():
 \r
 def _serve():\r
     from .util import is_exiting, sub_warning\r
-    \r
+\r
     while 1:\r
         try:\r
             conn = _listener.accept()\r
@@ -109,7 +109,7 @@ def _serve():
                     'thread for sharing handles raised exception :\n' +\r
                     '-'*79 + '\n' + traceback.format_exc() + '-'*79\r
                     )\r
-    \r
+\r
 #\r
 # Functions to be used for pickling/unpickling objects with handles\r
 #\r
@@ -176,15 +176,15 @@ copy_reg.pickle(socket.socket, reduce_socket)
 #\r
 \r
 if sys.platform == 'win32':\r
-    \r
+\r
     def reduce_pipe_connection(conn):\r
         rh = reduce_handle(conn.fileno())\r
         return rebuild_pipe_connection, (rh, conn.readable, conn.writable)\r
-    \r
+\r
     def rebuild_pipe_connection(reduced_handle, readable, writable):\r
         handle = rebuild_handle(reduced_handle)\r
         return _multiprocessing.PipeConnection(\r
             handle, readable=readable, writable=writable\r
             )\r
-    \r
+\r
     copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)\r
index 68772499f1eb0be6cb057ce91f2946297dd6245e..808d312306b6963df1e7231a3ce709a66d550926 100644 (file)
@@ -92,10 +92,10 @@ def copy(obj):
     new_obj = _new_value(type(obj))\r
     ctypes.pointer(new_obj)[0] = obj\r
     return new_obj\r
-    \r
+\r
 def synchronized(obj, lock=None):\r
     assert not isinstance(obj, SynchronizedBase), 'object already synchronized'\r
-    \r
+\r
     if isinstance(obj, ctypes._SimpleCData):\r
         return Synchronized(obj, lock)\r
     elif isinstance(obj, ctypes.Array):\r
@@ -123,7 +123,7 @@ def reduce_ctype(obj):
         return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)\r
     else:\r
         return rebuild_ctype, (type(obj), obj._wrapper, None)\r
-    \r
+\r
 def rebuild_ctype(type_, wrapper, length):\r
     if length is not None:\r
         type_ = type_ * length\r
@@ -170,7 +170,7 @@ class_cache = weakref.WeakKeyDictionary()
 #\r
 \r
 class SynchronizedBase(object):\r
-    \r
+\r
     def __init__(self, obj, lock=None):\r
         self._obj = obj\r
         self._lock = lock or RLock()\r
@@ -180,55 +180,55 @@ class SynchronizedBase(object):
     def __reduce__(self):\r
         assert_spawning(self)\r
         return synchronized, (self._obj, self._lock)\r
-    \r
+\r
     def get_obj(self):\r
         return self._obj\r
-    \r
+\r
     def get_lock(self):\r
         return self._lock\r
-    \r
+\r
     def __repr__(self):\r
         return '<%s wrapper for %s>' % (type(self).__name__, self._obj)\r
-    \r
-    \r
+\r
+\r
 class Synchronized(SynchronizedBase):\r
     value = make_property('value')\r
-    \r
-    \r
+\r
+\r
 class SynchronizedArray(SynchronizedBase):\r
-    \r
+\r
     def __len__(self):\r
         return len(self._obj)\r
-    \r
+\r
     def __getitem__(self, i):\r
         self.acquire()\r
         try:\r
             return self._obj[i]\r
         finally:\r
             self.release()\r
-            \r
+\r
     def __setitem__(self, i, value):\r
         self.acquire()\r
         try:\r
             self._obj[i] = value\r
         finally:\r
             self.release()\r
-            \r
+\r
     def __getslice__(self, start, stop):\r
         self.acquire()\r
         try:\r
             return self._obj[start:stop]\r
         finally:\r
             self.release()\r
-            \r
+\r
     def __setslice__(self, start, stop, values):\r
         self.acquire()\r
         try:\r
             self._obj[start:stop] = values\r
         finally:\r
             self.release()\r
-            \r
-            \r
+\r
+\r
 class SynchronizedString(SynchronizedArray):\r
     value = make_property('value')\r
     raw = make_property('raw')\r
index d6420322d3a3ecec2b3cc97bb2e29da5ce212cb7..6a7189a7b87ebc76f0f8d38ad8bf5eac55dd5e69 100644 (file)
@@ -38,7 +38,7 @@ class SemLock(object):
         sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)\r
         debug('created semlock with handle %s' % sl.handle)\r
         self._make_methods()\r
-        \r
+\r
         if sys.platform != 'win32':\r
             def _after_fork(obj):\r
                 obj._semlock._after_fork()\r
@@ -129,7 +129,7 @@ class RLock(SemLock):
 \r
     def __init__(self):\r
         SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)\r
-        \r
+\r
     def __repr__(self):\r
         try:\r
             if self._semlock._is_mine():\r
@@ -210,17 +210,17 @@ class Condition(object):
     def notify(self):\r
         assert self._lock._semlock._is_mine(), 'lock is not owned'\r
         assert not self._wait_semaphore.acquire(False)\r
-        \r
+\r
         # to take account of timeouts since last notify() we subtract\r
         # woken_count from sleeping_count and rezero woken_count\r
         while self._woken_count.acquire(False):\r
             res = self._sleeping_count.acquire(False)\r
             assert res\r
-            \r
+\r
         if self._sleeping_count.acquire(False): # try grabbing a sleeper\r
             self._wait_semaphore.release()      # wake up one sleeper\r
             self._woken_count.acquire()         # wait for the sleeper to wake\r
-            \r
+\r
             # rezero _wait_semaphore in case a timeout just happened\r
             self._wait_semaphore.acquire(False)\r
 \r
@@ -233,7 +233,7 @@ class Condition(object):
         while self._woken_count.acquire(False):\r
             res = self._sleeping_count.acquire(False)\r
             assert res\r
-            \r
+\r
         sleepers = 0\r
         while self._sleeping_count.acquire(False):\r
             self._wait_semaphore.release()        # wake up one sleeper\r
@@ -266,7 +266,7 @@ class Event(object):
             return False\r
         finally:\r
             self._cond.release()\r
-    \r
+\r
     def set(self):\r
         self._cond.acquire()\r
         try:\r
index d1b190c436a0230b154a24344c2492d81f31f73e..25ff8bd2965cc3c2f8c493162fde09b5d4ba72e8 100644 (file)
@@ -83,7 +83,7 @@ def _check_logger_class():
     import logging\r
     if hasattr(logging, 'multiprocessing'):\r
         return\r
-    \r
+\r
     logging._acquireLock()\r
     try:\r
         OldLoggerClass = logging.getLoggerClass()\r
index d75fd20fddcaa063460264fdb580f27740f1fb72..4d3527ca666509870daca5a5792ee56fa470dc34 100644 (file)
-#\r
-# Unit tests for the multiprocessing package\r
-#\r
-\r
-import unittest\r
-import threading\r
-import Queue\r
-import time\r
-import sys\r
-import os\r
-import gc\r
-import signal\r
-import array\r
-import copy\r
-import socket\r
-import random\r
-import logging\r
-\r
-import multiprocessing.dummy\r
-import multiprocessing.connection\r
-import multiprocessing.managers\r
-import multiprocessing.heap\r
-import multiprocessing.managers\r
-import multiprocessing.pool\r
-import _multiprocessing\r
-\r
-from multiprocessing import util\r
-\r
-#\r
-#\r
-#\r
-\r
-if sys.version_info >= (3, 0):\r
-    def latin(s):\r
-        return s.encode('latin')\r
-else:\r
-    latin = str\r
-\r
-try:\r
-    bytes\r
-except NameError:\r
-    bytes = str\r
-    def bytearray(seq):\r
-        return array.array('c', seq)\r
-\r
-#\r
-# Constants\r
-#\r
-\r
-LOG_LEVEL = util.SUBWARNING\r
-#LOG_LEVEL = logging.WARNING\r
-\r
-DELTA = 0.1\r
-CHECK_TIMINGS = False     # making true makes tests take a lot longer\r
-                          # and can sometimes cause some non-serious\r
-                          # failures because some calls block a bit\r
-                          # longer than expected\r
-if CHECK_TIMINGS:\r
-    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4\r
-else:\r
-    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1\r
-\r
-HAVE_GETVALUE = not getattr(_multiprocessing,\r
-                            'HAVE_BROKEN_SEM_GETVALUE', False)\r
-\r
-#\r
-# Creates a wrapper for a function which records the time it takes to finish\r
-#\r
-\r
-class TimingWrapper(object):\r
-\r
-    def __init__(self, func):\r
-        self.func = func\r
-        self.elapsed = None\r
-\r
-    def __call__(self, *args, **kwds):\r
-        t = time.time()\r
-        try:\r
-            return self.func(*args, **kwds)\r
-        finally:\r
-            self.elapsed = time.time() - t\r
-        \r
-#\r
-# Base class for test cases\r
-#\r
-\r
-class BaseTestCase(object):\r
-    \r
-    ALLOWED_TYPES = ('processes', 'manager', 'threads')\r
-\r
-    def assertTimingAlmostEqual(self, a, b):\r
-        if CHECK_TIMINGS:\r
-            self.assertAlmostEqual(a, b, 1)\r
-\r
-    def assertReturnsIfImplemented(self, value, func, *args):\r
-        try:\r
-            res = func(*args)\r
-        except NotImplementedError:\r
-            pass\r
-        else:\r
-            return self.assertEqual(value, res)\r
-\r
-#\r
-# Return the value of a semaphore\r
-#\r
-\r
-def get_value(self):\r
-    try:\r
-        return self.get_value()\r
-    except AttributeError:\r
-        try:\r
-            return self._Semaphore__value\r
-        except AttributeError:\r
-            try:\r
-                return self._value\r
-            except AttributeError:\r
-                raise NotImplementedError\r
-\r
-#\r
-# Testcases\r
-#\r
-\r
-class _TestProcess(BaseTestCase):\r
-    \r
-    ALLOWED_TYPES = ('processes', 'threads')\r
-    \r
-    def test_current(self):\r
-        if self.TYPE == 'threads':\r
-            return\r
-\r
-        current = self.current_process()\r
-        authkey = current.get_authkey()\r
-        \r
-        self.assertTrue(current.is_alive())\r
-        self.assertTrue(not current.is_daemon())        \r
-        self.assertTrue(isinstance(authkey, bytes))\r
-        self.assertTrue(len(authkey) > 0)\r
-        self.assertEqual(current.get_ident(), os.getpid())\r
-        self.assertEqual(current.get_exitcode(), None)\r
-\r
-    def _test(self, q, *args, **kwds):\r
-        current = self.current_process()\r
-        q.put(args)\r
-        q.put(kwds)\r
-        q.put(current.get_name())\r
-        if self.TYPE != 'threads':\r
-            q.put(bytes(current.get_authkey()))\r
-            q.put(current.pid)\r
-\r
-    def test_process(self):\r
-        q = self.Queue(1)\r
-        e = self.Event()\r
-        args = (q, 1, 2)\r
-        kwargs = {'hello':23, 'bye':2.54}\r
-        name = 'SomeProcess'\r
-        p = self.Process(\r
-            target=self._test, args=args, kwargs=kwargs, name=name\r
-            )\r
-        p.set_daemon(True)\r
-        current = self.current_process()\r
-\r
-        if self.TYPE != 'threads':\r
-            self.assertEquals(p.get_authkey(), current.get_authkey())\r
-        self.assertEquals(p.is_alive(), False)\r
-        self.assertEquals(p.is_daemon(), True)\r
-        self.assertTrue(p not in self.active_children())\r
-        self.assertTrue(type(self.active_children()) is list)\r
-        self.assertEqual(p.get_exitcode(), None)\r
-        \r
-        p.start()\r
-        \r
-        self.assertEquals(p.get_exitcode(), None)\r
-        self.assertEquals(p.is_alive(), True)\r
-        self.assertTrue(p in self.active_children())\r
-        \r
-        self.assertEquals(q.get(), args[1:])\r
-        self.assertEquals(q.get(), kwargs)\r
-        self.assertEquals(q.get(), p.get_name())\r
-        if self.TYPE != 'threads':\r
-            self.assertEquals(q.get(), current.get_authkey())\r
-            self.assertEquals(q.get(), p.pid)\r
-\r
-        p.join()\r
-\r
-        self.assertEquals(p.get_exitcode(), 0)\r
-        self.assertEquals(p.is_alive(), False)\r
-        self.assertTrue(p not in self.active_children())        \r
-\r
-    def _test_terminate(self):\r
-        time.sleep(1000)\r
-\r
-    def test_terminate(self):\r
-        if self.TYPE == 'threads':\r
-            return\r
-        \r
-        p = self.Process(target=self._test_terminate)\r
-        p.set_daemon(True)\r
-        p.start()\r
-\r
-        self.assertEqual(p.is_alive(), True)\r
-        self.assertTrue(p in self.active_children())\r
-        self.assertEqual(p.get_exitcode(), None)\r
-\r
-        p.terminate()\r
-\r
-        join = TimingWrapper(p.join)\r
-        self.assertEqual(join(), None)\r
-        self.assertTimingAlmostEqual(join.elapsed, 0.0)\r
-        \r
-        self.assertEqual(p.is_alive(), False)\r
-        self.assertTrue(p not in self.active_children())\r
-\r
-        p.join()\r
-\r
-        # XXX sometimes get p.get_exitcode() == 0 on Windows ...\r
-        #self.assertEqual(p.get_exitcode(), -signal.SIGTERM)\r
-\r
-    def test_cpu_count(self):\r
-        try:\r
-            cpus = multiprocessing.cpu_count()\r
-        except NotImplementedError:\r
-            cpus = 1\r
-        self.assertTrue(type(cpus) is int)\r
-        self.assertTrue(cpus >= 1)\r
-\r
-    def test_active_children(self):\r
-        self.assertEqual(type(self.active_children()), list)\r
-\r
-        p = self.Process(target=time.sleep, args=(DELTA,))\r
-        self.assertTrue(p not in self.active_children())\r
-        \r
-        p.start()\r
-        self.assertTrue(p in self.active_children())\r
-\r
-        p.join()\r
-        self.assertTrue(p not in self.active_children())\r
-\r
-    def _test_recursion(self, wconn, id):\r
-        from multiprocessing import forking\r
-        wconn.send(id)\r
-        if len(id) < 2:\r
-            for i in range(2):\r
-                p = self.Process(\r
-                    target=self._test_recursion, args=(wconn, id+[i])\r
-                    )\r
-                p.start()\r
-                p.join()\r
-\r
-    def test_recursion(self):\r
-        rconn, wconn = self.Pipe(duplex=False)\r
-        self._test_recursion(wconn, [])\r
-        \r
-        time.sleep(DELTA)\r
-        result = []\r
-        while rconn.poll():\r
-            result.append(rconn.recv())\r
-            \r
-        expected = [\r
-            [],\r
-              [0],\r
-                [0, 0],\r
-                [0, 1],\r
-              [1],\r
-                [1, 0],\r
-                [1, 1]\r
-            ]\r
-        self.assertEqual(result, expected)\r
-\r
-#\r
-#\r
-#\r
-\r
-class _UpperCaser(multiprocessing.Process):\r
-\r
-    def __init__(self):\r
-        multiprocessing.Process.__init__(self)\r
-        self.child_conn, self.parent_conn = multiprocessing.Pipe()\r
-\r
-    def run(self):\r
-        self.parent_conn.close()\r
-        for s in iter(self.child_conn.recv, None):\r
-            self.child_conn.send(s.upper())\r
-        self.child_conn.close()\r
-\r
-    def submit(self, s):\r
-        assert type(s) is str\r
-        self.parent_conn.send(s)\r
-        return self.parent_conn.recv()\r
-\r
-    def stop(self):\r
-        self.parent_conn.send(None)\r
-        self.parent_conn.close()\r
-        self.child_conn.close()\r
-\r
-class _TestSubclassingProcess(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def test_subclassing(self):\r
-        uppercaser = _UpperCaser()\r
-        uppercaser.start()\r
-        self.assertEqual(uppercaser.submit('hello'), 'HELLO')\r
-        self.assertEqual(uppercaser.submit('world'), 'WORLD')\r
-        uppercaser.stop()\r
-        uppercaser.join()\r
-        \r
-#\r
-#\r
-#\r
-\r
-def queue_empty(q):\r
-    if hasattr(q, 'empty'):\r
-        return q.empty()\r
-    else:\r
-        return q.qsize() == 0\r
-\r
-def queue_full(q, maxsize):\r
-    if hasattr(q, 'full'):\r
-        return q.full()\r
-    else:\r
-        return q.qsize() == maxsize\r
-\r
-\r
-class _TestQueue(BaseTestCase):\r
-\r
-\r
-    def _test_put(self, queue, child_can_start, parent_can_continue):\r
-        child_can_start.wait()\r
-        for i in range(6):\r
-            queue.get()\r
-        parent_can_continue.set()\r
-\r
-    def test_put(self):\r
-        MAXSIZE = 6\r
-        queue = self.Queue(maxsize=MAXSIZE)\r
-        child_can_start = self.Event()\r
-        parent_can_continue = self.Event()\r
-\r
-        proc = self.Process(\r
-            target=self._test_put,\r
-            args=(queue, child_can_start, parent_can_continue)\r
-            )\r
-        proc.set_daemon(True)\r
-        proc.start()\r
-        \r
-        self.assertEqual(queue_empty(queue), True)\r
-        self.assertEqual(queue_full(queue, MAXSIZE), False)\r
-\r
-        queue.put(1)\r
-        queue.put(2, True)\r
-        queue.put(3, True, None)\r
-        queue.put(4, False)\r
-        queue.put(5, False, None)\r
-        queue.put_nowait(6)\r
-\r
-        # the values may be in buffer but not yet in pipe so sleep a bit\r
-        time.sleep(DELTA)     \r
-\r
-        self.assertEqual(queue_empty(queue), False)\r
-        self.assertEqual(queue_full(queue, MAXSIZE), True)\r
-\r
-        put = TimingWrapper(queue.put)\r
-        put_nowait = TimingWrapper(queue.put_nowait)\r
-\r
-        self.assertRaises(Queue.Full, put, 7, False)\r
-        self.assertTimingAlmostEqual(put.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Full, put, 7, False, None)\r
-        self.assertTimingAlmostEqual(put.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Full, put_nowait, 7)\r
-        self.assertTimingAlmostEqual(put_nowait.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)\r
-        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)\r
-\r
-        self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)\r
-        self.assertTimingAlmostEqual(put.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)\r
-        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)\r
-\r
-        child_can_start.set()\r
-        parent_can_continue.wait()\r
-\r
-        self.assertEqual(queue_empty(queue), True)\r
-        self.assertEqual(queue_full(queue, MAXSIZE), False)\r
-\r
-        proc.join()\r
-\r
-    def _test_get(self, queue, child_can_start, parent_can_continue):\r
-        child_can_start.wait()\r
-        queue.put(1)\r
-        queue.put(2)\r
-        queue.put(3)\r
-        queue.put(4)\r
-        queue.put(5)\r
-        parent_can_continue.set()\r
-        \r
-    def test_get(self):\r
-        queue = self.Queue()\r
-        child_can_start = self.Event()\r
-        parent_can_continue = self.Event()\r
-        \r
-        proc = self.Process(\r
-            target=self._test_get,\r
-            args=(queue, child_can_start, parent_can_continue)\r
-            )\r
-        proc.set_daemon(True)\r
-        proc.start()\r
-        \r
-        self.assertEqual(queue_empty(queue), True)\r
-        \r
-        child_can_start.set()\r
-        parent_can_continue.wait()\r
-\r
-        time.sleep(DELTA)\r
-        self.assertEqual(queue_empty(queue), False)\r
-\r
-        self.assertEqual(queue.get(), 1)\r
-        self.assertEqual(queue.get(True, None), 2)\r
-        self.assertEqual(queue.get(True), 3)\r
-        self.assertEqual(queue.get(timeout=1), 4)\r
-        self.assertEqual(queue.get_nowait(), 5)\r
-        \r
-        self.assertEqual(queue_empty(queue), True)\r
-\r
-        get = TimingWrapper(queue.get)\r
-        get_nowait = TimingWrapper(queue.get_nowait)\r
-        \r
-        self.assertRaises(Queue.Empty, get, False)\r
-        self.assertTimingAlmostEqual(get.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Empty, get, False, None)\r
-        self.assertTimingAlmostEqual(get.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Empty, get_nowait)\r
-        self.assertTimingAlmostEqual(get_nowait.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Empty, get, True, TIMEOUT1)\r
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)\r
-\r
-        self.assertRaises(Queue.Empty, get, False, TIMEOUT2)\r
-        self.assertTimingAlmostEqual(get.elapsed, 0)\r
-\r
-        self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)\r
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)\r
-\r
-        proc.join()\r
-        \r
-    def _test_fork(self, queue):\r
-        for i in range(10, 20):\r
-            queue.put(i)\r
-        # note that at this point the items may only be buffered, so the\r
-        # process cannot shutdown until the feeder thread has finished\r
-        # pushing items onto the pipe.\r
-\r
-    def test_fork(self):\r
-        # Old versions of Queue would fail to create a new feeder\r
-        # thread for a forked process if the original process had its\r
-        # own feeder thread.  This test checks that this no longer\r
-        # happens.\r
-\r
-        queue = self.Queue()\r
-\r
-        # put items on queue so that main process starts a feeder thread\r
-        for i in range(10):\r
-            queue.put(i)\r
-\r
-        # wait to make sure thread starts before we fork a new process\r
-        time.sleep(DELTA)\r
-\r
-        # fork process\r
-        p = self.Process(target=self._test_fork, args=(queue,))\r
-        p.start()\r
-\r
-        # check that all expected items are in the queue\r
-        for i in range(20):\r
-            self.assertEqual(queue.get(), i)\r
-        self.assertRaises(Queue.Empty, queue.get, False)\r
-\r
-        p.join()\r
-\r
-    def test_qsize(self):\r
-        q = self.Queue()\r
-        try:\r
-            self.assertEqual(q.qsize(), 0)\r
-        except NotImplementedError:\r
-            return\r
-        q.put(1)\r
-        self.assertEqual(q.qsize(), 1)\r
-        q.put(5)\r
-        self.assertEqual(q.qsize(), 2)\r
-        q.get()\r
-        self.assertEqual(q.qsize(), 1)\r
-        q.get()\r
-        self.assertEqual(q.qsize(), 0)\r
-\r
-    def _test_task_done(self, q):\r
-        for obj in iter(q.get, None):\r
-            time.sleep(DELTA)\r
-            q.task_done()\r
-\r
-    def test_task_done(self):\r
-        queue = self.JoinableQueue()\r
-\r
-        if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):\r
-            return\r
-\r
-        workers = [self.Process(target=self._test_task_done, args=(queue,))\r
-                   for i in xrange(4)]\r
-        \r
-        for p in workers:\r
-            p.start()\r
-\r
-        for i in xrange(10):\r
-            queue.put(i)\r
-\r
-        queue.join()\r
-\r
-        for p in workers:\r
-            queue.put(None)\r
-        \r
-        for p in workers:\r
-            p.join()\r
-\r
-#\r
-#\r
-#\r
-\r
-class _TestLock(BaseTestCase):\r
-\r
-    def test_lock(self):\r
-        lock = self.Lock()\r
-        self.assertEqual(lock.acquire(), True)\r
-        self.assertEqual(lock.acquire(False), False)\r
-        self.assertEqual(lock.release(), None)\r
-        self.assertRaises((ValueError, threading.ThreadError), lock.release)\r
-\r
-    def test_rlock(self):\r
-        lock = self.RLock()\r
-        self.assertEqual(lock.acquire(), True)\r
-        self.assertEqual(lock.acquire(), True)\r
-        self.assertEqual(lock.acquire(), True)\r
-        self.assertEqual(lock.release(), None)\r
-        self.assertEqual(lock.release(), None)\r
-        self.assertEqual(lock.release(), None)\r
-        self.assertRaises((AssertionError, RuntimeError), lock.release)\r
-        \r
-        \r
-class _TestSemaphore(BaseTestCase):\r
-\r
-    def _test_semaphore(self, sem):\r
-        self.assertReturnsIfImplemented(2, get_value, sem)\r
-        self.assertEqual(sem.acquire(), True)\r
-        self.assertReturnsIfImplemented(1, get_value, sem)\r
-        self.assertEqual(sem.acquire(), True)\r
-        self.assertReturnsIfImplemented(0, get_value, sem)\r
-        self.assertEqual(sem.acquire(False), False)\r
-        self.assertReturnsIfImplemented(0, get_value, sem)\r
-        self.assertEqual(sem.release(), None)\r
-        self.assertReturnsIfImplemented(1, get_value, sem)\r
-        self.assertEqual(sem.release(), None)\r
-        self.assertReturnsIfImplemented(2, get_value, sem)\r
-        \r
-    def test_semaphore(self):\r
-        sem = self.Semaphore(2)\r
-        self._test_semaphore(sem)\r
-        self.assertEqual(sem.release(), None)\r
-        self.assertReturnsIfImplemented(3, get_value, sem)\r
-        self.assertEqual(sem.release(), None)\r
-        self.assertReturnsIfImplemented(4, get_value, sem)\r
-\r
-    def test_bounded_semaphore(self):\r
-        sem = self.BoundedSemaphore(2)\r
-        self._test_semaphore(sem)\r
-        # Currently fails on OS/X\r
-        #if HAVE_GETVALUE:\r
-        #    self.assertRaises(ValueError, sem.release)\r
-        #    self.assertReturnsIfImplemented(2, get_value, sem)\r
-\r
-    def test_timeout(self):\r
-        if self.TYPE != 'processes':\r
-            return\r
-\r
-        sem = self.Semaphore(0)\r
-        acquire = TimingWrapper(sem.acquire)\r
-\r
-        self.assertEqual(acquire(False), False)\r
-        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)\r
-\r
-        self.assertEqual(acquire(False, None), False)\r
-        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)\r
-\r
-        self.assertEqual(acquire(False, TIMEOUT1), False)\r
-        self.assertTimingAlmostEqual(acquire.elapsed, 0)\r
-\r
-        self.assertEqual(acquire(True, TIMEOUT2), False)\r
-        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)\r
-\r
-        self.assertEqual(acquire(timeout=TIMEOUT3), False)\r
-        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)\r
-\r
-\r
-class _TestCondition(BaseTestCase):\r
-    \r
-    def f(self, cond, sleeping, woken, timeout=None):\r
-        cond.acquire()\r
-        sleeping.release()\r
-        cond.wait(timeout)\r
-        woken.release()\r
-        cond.release()\r
-    \r
-    def check_invariant(self, cond):\r
-        # this is only supposed to succeed when there are no sleepers\r
-        if self.TYPE == 'processes':\r
-            try:\r
-                sleepers = (cond._sleeping_count.get_value() -\r
-                            cond._woken_count.get_value())\r
-                self.assertEqual(sleepers, 0)\r
-                self.assertEqual(cond._wait_semaphore.get_value(), 0)\r
-            except NotImplementedError:\r
-                pass\r
-            \r
-    def test_notify(self):\r
-        cond = self.Condition()\r
-        sleeping = self.Semaphore(0)\r
-        woken = self.Semaphore(0)\r
-        \r
-        p = self.Process(target=self.f, args=(cond, sleeping, woken))\r
-        p.set_daemon(True)\r
-        p.start()\r
-\r
-        p = threading.Thread(target=self.f, args=(cond, sleeping, woken))\r
-        p.set_daemon(True)\r
-        p.start()\r
-        \r
-        # wait for both children to start sleeping\r
-        sleeping.acquire()\r
-        sleeping.acquire()\r
-        \r
-        # check no process/thread has woken up\r
-        time.sleep(DELTA)\r
-        self.assertReturnsIfImplemented(0, get_value, woken)\r
-\r
-        # wake up one process/thread\r
-        cond.acquire()\r
-        cond.notify()\r
-        cond.release()\r
-        \r
-        # check one process/thread has woken up\r
-        time.sleep(DELTA)\r
-        self.assertReturnsIfImplemented(1, get_value, woken)\r
-\r
-        # wake up another\r
-        cond.acquire()\r
-        cond.notify()\r
-        cond.release()\r
-        \r
-        # check other has woken up\r
-        time.sleep(DELTA)\r
-        self.assertReturnsIfImplemented(2, get_value, woken)\r
-        \r
-        # check state is not mucked up\r
-        self.check_invariant(cond)\r
-        p.join()\r
-        \r
-    def test_notify_all(self):\r
-        cond = self.Condition()\r
-        sleeping = self.Semaphore(0)\r
-        woken = self.Semaphore(0)\r
-\r
-        # start some threads/processes which will timeout\r
-        for i in range(3):\r
-            p = self.Process(target=self.f,\r
-                             args=(cond, sleeping, woken, TIMEOUT1))\r
-            p.set_daemon(True)\r
-            p.start()\r
-\r
-            t = threading.Thread(target=self.f,\r
-                                 args=(cond, sleeping, woken, TIMEOUT1))\r
-            t.set_daemon(True)\r
-            t.start()\r
-\r
-        # wait for them all to sleep\r
-        for i in xrange(6):\r
-            sleeping.acquire()\r
-\r
-        # check they have all timed out\r
-        for i in xrange(6):\r
-            woken.acquire()\r
-        self.assertReturnsIfImplemented(0, get_value, woken)\r
-\r
-        # check state is not mucked up\r
-        self.check_invariant(cond)\r
-\r
-        # start some more threads/processes\r
-        for i in range(3):\r
-            p = self.Process(target=self.f, args=(cond, sleeping, woken))\r
-            p.set_daemon(True)\r
-            p.start()\r
-            \r
-            t = threading.Thread(target=self.f, args=(cond, sleeping, woken))\r
-            t.set_daemon(True)\r
-            t.start()\r
-            \r
-        # wait for them to all sleep\r
-        for i in xrange(6):\r
-            sleeping.acquire()\r
-            \r
-        # check no process/thread has woken up\r
-        time.sleep(DELTA)\r
-        self.assertReturnsIfImplemented(0, get_value, woken)\r
-\r
-        # wake them all up\r
-        cond.acquire()\r
-        cond.notify_all()\r
-        cond.release()\r
-\r
-        # check they have all woken\r
-        time.sleep(DELTA)\r
-        self.assertReturnsIfImplemented(6, get_value, woken)\r
-\r
-        # check state is not mucked up\r
-        self.check_invariant(cond)\r
-\r
-    def test_timeout(self):\r
-        cond = self.Condition()\r
-        wait = TimingWrapper(cond.wait)\r
-        cond.acquire()\r
-        res = wait(TIMEOUT1)\r
-        cond.release()\r
-        self.assertEqual(res, None)\r
-        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)\r
-\r
-        \r
-class _TestEvent(BaseTestCase):\r
-\r
-    def _test_event(self, event):\r
-        time.sleep(TIMEOUT2)\r
-        event.set()\r
-\r
-    def test_event(self):\r
-        event = self.Event()\r
-        wait = TimingWrapper(event.wait)\r
-        \r
-        # Removed temporaily, due to API shear, this does not \r
-        # work with threading._Event objects. is_set == isSet\r
-        #self.assertEqual(event.is_set(), False)\r
-        \r
-        self.assertEqual(wait(0.0), None)\r
-        self.assertTimingAlmostEqual(wait.elapsed, 0.0)\r
-        self.assertEqual(wait(TIMEOUT1), None)\r
-        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)\r
-\r
-        event.set()\r
-\r
-        # See note above on the API differences\r
-        # self.assertEqual(event.is_set(), True)\r
-        self.assertEqual(wait(), None)\r
-        self.assertTimingAlmostEqual(wait.elapsed, 0.0)\r
-        self.assertEqual(wait(TIMEOUT1), None)\r
-        self.assertTimingAlmostEqual(wait.elapsed, 0.0)\r
-        # self.assertEqual(event.is_set(), True)\r
-\r
-        event.clear()\r
-\r
-        #self.assertEqual(event.is_set(), False)\r
-\r
-        self.Process(target=self._test_event, args=(event,)).start()\r
-        self.assertEqual(wait(), None)\r
-\r
-#\r
-#\r
-#\r
-\r
-class _TestValue(BaseTestCase):\r
-\r
-    codes_values = [\r
-        ('i', 4343, 24234),\r
-        ('d', 3.625, -4.25),\r
-        ('h', -232, 234),\r
-        ('c', latin('x'), latin('y'))\r
-        ]\r
-\r
-    def _test(self, values):\r
-        for sv, cv in zip(values, self.codes_values):\r
-            sv.value = cv[2]\r
-            \r
-        \r
-    def test_value(self, raw=False):\r
-        if self.TYPE != 'processes':\r
-            return\r
-\r
-        if raw:\r
-            values = [self.RawValue(code, value)\r
-                      for code, value, _ in self.codes_values]\r
-        else:\r
-            values = [self.Value(code, value)\r
-                      for code, value, _ in self.codes_values]\r
-            \r
-        for sv, cv in zip(values, self.codes_values):\r
-            self.assertEqual(sv.value, cv[1])\r
-        \r
-        proc = self.Process(target=self._test, args=(values,))\r
-        proc.start()\r
-        proc.join()\r
-\r
-        for sv, cv in zip(values, self.codes_values):\r
-            self.assertEqual(sv.value, cv[2])\r
-\r
-    def test_rawvalue(self):\r
-        self.test_value(raw=True)\r
-\r
-    def test_getobj_getlock(self):\r
-        if self.TYPE != 'processes':\r
-            return\r
-\r
-        val1 = self.Value('i', 5)\r
-        lock1 = val1.get_lock()\r
-        obj1 = val1.get_obj()\r
-\r
-        val2 = self.Value('i', 5, lock=None)\r
-        lock2 = val2.get_lock()\r
-        obj2 = val2.get_obj()\r
-\r
-        lock = self.Lock()\r
-        val3 = self.Value('i', 5, lock=lock)\r
-        lock3 = val3.get_lock()\r
-        obj3 = val3.get_obj()\r
-        self.assertEqual(lock, lock3)\r
-        \r
-        arr4 = self.RawValue('i', 5)\r
-        self.assertFalse(hasattr(arr4, 'get_lock'))\r
-        self.assertFalse(hasattr(arr4, 'get_obj'))\r
-\r
-\r
-class _TestArray(BaseTestCase):\r
-\r
-    def f(self, seq):\r
-        for i in range(1, len(seq)):\r
-            seq[i] += seq[i-1]\r
-\r
-    def test_array(self, raw=False):\r
-        if self.TYPE != 'processes':\r
-            return\r
-\r
-        seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]\r
-        if raw:\r
-            arr = self.RawArray('i', seq)\r
-        else:\r
-            arr = self.Array('i', seq)\r
-        \r
-        self.assertEqual(len(arr), len(seq))\r
-        self.assertEqual(arr[3], seq[3])\r
-        self.assertEqual(list(arr[2:7]), list(seq[2:7]))\r
-        \r
-        arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])\r
-        \r
-        self.assertEqual(list(arr[:]), seq)\r
-        \r
-        self.f(seq)\r
-        \r
-        p = self.Process(target=self.f, args=(arr,))\r
-        p.start()\r
-        p.join()\r
-        \r
-        self.assertEqual(list(arr[:]), seq)\r
-        \r
-    def test_rawarray(self):\r
-        self.test_array(raw=True)\r
-        \r
-    def test_getobj_getlock_obj(self):\r
-        if self.TYPE != 'processes':\r
-            return\r
-\r
-        arr1 = self.Array('i', range(10))\r
-        lock1 = arr1.get_lock()\r
-        obj1 = arr1.get_obj()\r
-\r
-        arr2 = self.Array('i', range(10), lock=None)\r
-        lock2 = arr2.get_lock()\r
-        obj2 = arr2.get_obj()\r
-\r
-        lock = self.Lock()\r
-        arr3 = self.Array('i', range(10), lock=lock)\r
-        lock3 = arr3.get_lock()\r
-        obj3 = arr3.get_obj()\r
-        self.assertEqual(lock, lock3)\r
-        \r
-        arr4 = self.RawArray('i', range(10))\r
-        self.assertFalse(hasattr(arr4, 'get_lock'))\r
-        self.assertFalse(hasattr(arr4, 'get_obj'))\r
-\r
-#\r
-#\r
-#\r
-\r
-class _TestContainers(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('manager',)\r
-\r
-    def test_list(self):\r
-        a = self.list(range(10))\r
-        self.assertEqual(a[:], range(10))\r
-        \r
-        b = self.list()\r
-        self.assertEqual(b[:], [])\r
-        \r
-        b.extend(range(5))\r
-        self.assertEqual(b[:], range(5))\r
-        \r
-        self.assertEqual(b[2], 2)\r
-        self.assertEqual(b[2:10], [2,3,4])\r
-\r
-        b *= 2\r
-        self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])\r
-\r
-        self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])\r
-\r
-        self.assertEqual(a[:], range(10))\r
-\r
-        d = [a, b]\r
-        e = self.list(d)\r
-        self.assertEqual(\r
-            e[:],\r
-            [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]\r
-            )\r
-        \r
-        f = self.list([a])\r
-        a.append('hello')\r
-        self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])\r
-\r
-    def test_dict(self):\r
-        d = self.dict()\r
-        indices = range(65, 70)\r
-        for i in indices:\r
-            d[i] = chr(i)\r
-        self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))\r
-        self.assertEqual(sorted(d.keys()), indices)\r
-        self.assertEqual(sorted(d.values()), [chr(i) for i in indices])\r
-        self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])\r
-        \r
-    def test_namespace(self):\r
-        n = self.Namespace()\r
-        n.name = 'Bob'\r
-        n.job = 'Builder'\r
-        n._hidden = 'hidden'\r
-        self.assertEqual((n.name, n.job), ('Bob', 'Builder'))\r
-        del n.job\r
-        self.assertEqual(str(n), "Namespace(name='Bob')")\r
-        self.assertTrue(hasattr(n, 'name'))\r
-        self.assertTrue(not hasattr(n, 'job'))\r
-\r
-#\r
-#\r
-#\r
-\r
-def sqr(x, wait=0.0):\r
-    time.sleep(wait)\r
-    return x*x\r
-\r
-class _TestPool(BaseTestCase):\r
-\r
-    def test_apply(self):\r
-        papply = self.pool.apply\r
-        self.assertEqual(papply(sqr, (5,)), sqr(5))\r
-        self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))\r
-\r
-    def test_map(self):\r
-        pmap = self.pool.map\r
-        self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))\r
-        self.assertEqual(pmap(sqr, range(100), chunksize=20),\r
-                         map(sqr, range(100)))\r
-        \r
-    def test_async(self):\r
-        res = self.pool.apply_async(sqr, (7, TIMEOUT1,))\r
-        get = TimingWrapper(res.get)\r
-        self.assertEqual(get(), 49)\r
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)\r
-\r
-    def test_async_timeout(self):\r
-        res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))\r
-        get = TimingWrapper(res.get)\r
-        self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)\r
-        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)\r
-\r
-    def test_imap(self):\r
-        it = self.pool.imap(sqr, range(10))\r
-        self.assertEqual(list(it), map(sqr, range(10)))\r
-\r
-        it = self.pool.imap(sqr, range(10))\r
-        for i in range(10):\r
-            self.assertEqual(it.next(), i*i)\r
-        self.assertRaises(StopIteration, it.next)\r
-\r
-        it = self.pool.imap(sqr, range(1000), chunksize=100)\r
-        for i in range(1000):\r
-            self.assertEqual(it.next(), i*i)\r
-        self.assertRaises(StopIteration, it.next)\r
-\r
-    def test_imap_unordered(self):\r
-        it = self.pool.imap_unordered(sqr, range(1000))\r
-        self.assertEqual(sorted(it), map(sqr, range(1000)))\r
-\r
-        it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)\r
-        self.assertEqual(sorted(it), map(sqr, range(1000)))\r
-\r
-    def test_make_pool(self):\r
-        p = multiprocessing.Pool(3)\r
-        self.assertEqual(3, len(p._pool))\r
-        p.close()\r
-        p.join()\r
-\r
-    def test_terminate(self):\r
-        if self.TYPE == 'manager':\r
-            # On Unix a forked process increfs each shared object to\r
-            # which its parent process held a reference.  If the\r
-            # forked process gets terminated then there is likely to\r
-            # be a reference leak.  So to prevent\r
-            # _TestZZZNumberOfObjects from failing we skip this test\r
-            # when using a manager.\r
-            return\r
-\r
-        result = self.pool.map_async(\r
-            time.sleep, [0.1 for i in range(10000)], chunksize=1\r
-            )\r
-        self.pool.terminate()\r
-        join = TimingWrapper(self.pool.join)\r
-        join()\r
-        self.assertTrue(join.elapsed < 0.2)\r
-\r
-#\r
-# Test that manager has expected number of shared objects left\r
-#\r
-\r
-class _TestZZZNumberOfObjects(BaseTestCase):\r
-    # Because test cases are sorted alphabetically, this one will get\r
-    # run after all the other tests for the manager.  It tests that\r
-    # there have been no "reference leaks" for the manager's shared\r
-    # objects.  Note the comment in _TestPool.test_terminate().\r
-    ALLOWED_TYPES = ('manager',)\r
-\r
-    def test_number_of_objects(self):\r
-        EXPECTED_NUMBER = 1                # the pool object is still alive\r
-        multiprocessing.active_children()  # discard dead process objs\r
-        gc.collect()                       # do garbage collection\r
-        refs = self.manager._number_of_objects()\r
-        if refs != EXPECTED_NUMBER:\r
-            print self.manager._debugInfo()\r
-\r
-        self.assertEqual(refs, EXPECTED_NUMBER)\r
-\r
-#\r
-# Test of creating a customized manager class\r
-#\r
-\r
-from multiprocessing.managers import BaseManager, BaseProxy, RemoteError\r
-    \r
-class FooBar(object):\r
-    def f(self):\r
-        return 'f()'\r
-    def g(self):\r
-        raise ValueError\r
-    def _h(self):\r
-        return '_h()'\r
-    \r
-def baz():\r
-    for i in xrange(10):\r
-        yield i*i\r
-\r
-class IteratorProxy(BaseProxy):\r
-    _exposed_ = ('next', '__next__')\r
-    def __iter__(self):\r
-        return self\r
-    def next(self):\r
-        return self._callmethod('next')\r
-    def __next__(self):\r
-        return self._callmethod('__next__')\r
-\r
-class MyManager(BaseManager):\r
-    pass\r
-\r
-MyManager.register('Foo', callable=FooBar)\r
-MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))\r
-MyManager.register('baz', callable=baz, proxytype=IteratorProxy)\r
-\r
-\r
-class _TestMyManager(BaseTestCase):\r
-    \r
-    ALLOWED_TYPES = ('manager',)\r
-\r
-    def test_mymanager(self):\r
-        manager = MyManager()\r
-        manager.start()\r
-        \r
-        foo = manager.Foo()\r
-        bar = manager.Bar()\r
-        baz = manager.baz()\r
-        \r
-        foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]\r
-        bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]\r
-        \r
-        self.assertEqual(foo_methods, ['f', 'g'])\r
-        self.assertEqual(bar_methods, ['f', '_h'])\r
-        \r
-        self.assertEqual(foo.f(), 'f()')\r
-        self.assertRaises(ValueError, foo.g)\r
-        self.assertEqual(foo._callmethod('f'), 'f()')\r
-        self.assertRaises(RemoteError, foo._callmethod, '_h')\r
-        \r
-        self.assertEqual(bar.f(), 'f()')\r
-        self.assertEqual(bar._h(), '_h()')\r
-        self.assertEqual(bar._callmethod('f'), 'f()')\r
-        self.assertEqual(bar._callmethod('_h'), '_h()')\r
-        \r
-        self.assertEqual(list(baz), [i*i for i in range(10)])\r
-        \r
-        manager.shutdown()\r
-        \r
-#\r
-# Test of connecting to a remote server and using xmlrpclib for serialization\r
-#\r
-\r
-_queue = Queue.Queue()\r
-def get_queue():\r
-    return _queue\r
-\r
-class QueueManager(BaseManager):\r
-    '''manager class used by server process'''\r
-QueueManager.register('get_queue', callable=get_queue)\r
-\r
-class QueueManager2(BaseManager):\r
-    '''manager class which specifies the same interface as QueueManager'''\r
-QueueManager2.register('get_queue')\r
-\r
-\r
-SERIALIZER = 'xmlrpclib'\r
-\r
-class _TestRemoteManager(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('manager',)\r
-    \r
-    def _putter(self, address, authkey):\r
-        manager = QueueManager2(\r
-            address=address, authkey=authkey, serializer=SERIALIZER\r
-            )\r
-        manager.connect()\r
-        queue = manager.get_queue()\r
-        queue.put(('hello world', None, True, 2.25))\r
-\r
-    def test_remote(self):\r
-        authkey = os.urandom(32)\r
-\r
-        manager = QueueManager(\r
-            address=('localhost', 0), authkey=authkey, serializer=SERIALIZER\r
-            )\r
-        manager.start()\r
-\r
-        p = self.Process(target=self._putter, args=(manager.address, authkey))\r
-        p.start()\r
-        \r
-        manager2 = QueueManager2(\r
-            address=manager.address, authkey=authkey, serializer=SERIALIZER\r
-            )\r
-        manager2.connect()\r
-        queue = manager2.get_queue()\r
-        \r
-        # Note that xmlrpclib will deserialize object as a list not a tuple\r
-        self.assertEqual(queue.get(), ['hello world', None, True, 2.25])\r
-\r
-        # Because we are using xmlrpclib for serialization instead of\r
-        # pickle this will cause a serialization error.\r
-        self.assertRaises(Exception, queue.put, time.sleep)\r
-\r
-        # Make queue finalizer run before the server is stopped\r
-        del queue\r
-        manager.shutdown()\r
-\r
-#\r
-#\r
-#\r
-\r
-SENTINEL = latin('')\r
-\r
-class _TestConnection(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes', 'threads')\r
-\r
-    def _echo(self, conn):\r
-        for msg in iter(conn.recv_bytes, SENTINEL):\r
-            conn.send_bytes(msg)\r
-        conn.close()\r
-\r
-    def test_connection(self):\r
-        conn, child_conn = self.Pipe()\r
-        \r
-        p = self.Process(target=self._echo, args=(child_conn,))\r
-        p.set_daemon(True)\r
-        p.start()\r
-\r
-        seq = [1, 2.25, None]\r
-        msg = latin('hello world')\r
-        longmsg = msg * 10\r
-        arr = array.array('i', range(4))\r
-\r
-        if self.TYPE == 'processes':\r
-            self.assertEqual(type(conn.fileno()), int)\r
-\r
-        self.assertEqual(conn.send(seq), None)\r
-        self.assertEqual(conn.recv(), seq)\r
-\r
-        self.assertEqual(conn.send_bytes(msg), None)\r
-        self.assertEqual(conn.recv_bytes(), msg)\r
-\r
-        if self.TYPE == 'processes':\r
-            buffer = array.array('i', [0]*10)\r
-            expected = list(arr) + [0] * (10 - len(arr))\r
-            self.assertEqual(conn.send_bytes(arr), None)\r
-            self.assertEqual(conn.recv_bytes_into(buffer),\r
-                             len(arr) * buffer.itemsize)\r
-            self.assertEqual(list(buffer), expected)\r
-\r
-            buffer = array.array('i', [0]*10)\r
-            expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))\r
-            self.assertEqual(conn.send_bytes(arr), None)\r
-            self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),\r
-                             len(arr) * buffer.itemsize)\r
-            self.assertEqual(list(buffer), expected)\r
-\r
-            buffer = bytearray(latin(' ' * 40))\r
-            self.assertEqual(conn.send_bytes(longmsg), None)\r
-            try:\r
-                res = conn.recv_bytes_into(buffer)\r
-            except multiprocessing.BufferTooShort, e:\r
-                self.assertEqual(e.args, (longmsg,))\r
-            else:\r
-                self.fail('expected BufferTooShort, got %s' % res)\r
-\r
-        poll = TimingWrapper(conn.poll)\r
-\r
-        self.assertEqual(poll(), False)\r
-        self.assertTimingAlmostEqual(poll.elapsed, 0)\r
-\r
-        self.assertEqual(poll(TIMEOUT1), False)\r
-        self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)\r
-\r
-        conn.send(None)\r
-\r
-        self.assertEqual(poll(TIMEOUT1), True)\r
-        self.assertTimingAlmostEqual(poll.elapsed, 0)\r
-        \r
-        self.assertEqual(conn.recv(), None)\r
-\r
-        really_big_msg = latin('X') * (1024 * 1024 * 16)   # 16Mb\r
-        conn.send_bytes(really_big_msg)\r
-        self.assertEqual(conn.recv_bytes(), really_big_msg)\r
-        \r
-        conn.send_bytes(SENTINEL)                          # tell child to quit\r
-        child_conn.close()\r
-\r
-        if self.TYPE == 'processes':\r
-            self.assertEqual(conn.readable, True)\r
-            self.assertEqual(conn.writable, True)\r
-            self.assertRaises(EOFError, conn.recv)\r
-            self.assertRaises(EOFError, conn.recv_bytes)\r
-\r
-        p.join()\r
-        \r
-    def test_duplex_false(self):\r
-        reader, writer = self.Pipe(duplex=False)\r
-        self.assertEqual(writer.send(1), None)\r
-        self.assertEqual(reader.recv(), 1)\r
-        if self.TYPE == 'processes':\r
-            self.assertEqual(reader.readable, True)\r
-            self.assertEqual(reader.writable, False)\r
-            self.assertEqual(writer.readable, False)\r
-            self.assertEqual(writer.writable, True)\r
-            self.assertRaises(IOError, reader.send, 2)\r
-            self.assertRaises(IOError, writer.recv)\r
-            self.assertRaises(IOError, writer.poll)\r
-\r
-    def test_spawn_close(self):\r
-        # We test that a pipe connection can be closed by parent\r
-        # process immediately after child is spawned.  On Windows this\r
-        # would have sometimes failed on old versions because\r
-        # child_conn would be closed before the child got a chance to\r
-        # duplicate it.\r
-        conn, child_conn = self.Pipe()\r
-        \r
-        p = self.Process(target=self._echo, args=(child_conn,))\r
-        p.start()\r
-        child_conn.close()    # this might complete before child initializes\r
-\r
-        msg = latin('hello')\r
-        conn.send_bytes(msg)\r
-        self.assertEqual(conn.recv_bytes(), msg)\r
-\r
-        conn.send_bytes(SENTINEL)\r
-        conn.close()\r
-        p.join()\r
-\r
-    def test_sendbytes(self):\r
-        if self.TYPE != 'processes':\r
-            return\r
-\r
-        msg = latin('abcdefghijklmnopqrstuvwxyz')\r
-        a, b = self.Pipe()\r
-        \r
-        a.send_bytes(msg)\r
-        self.assertEqual(b.recv_bytes(), msg)\r
-\r
-        a.send_bytes(msg, 5)\r
-        self.assertEqual(b.recv_bytes(), msg[5:])\r
-\r
-        a.send_bytes(msg, 7, 8)\r
-        self.assertEqual(b.recv_bytes(), msg[7:7+8])\r
-\r
-        a.send_bytes(msg, 26)\r
-        self.assertEqual(b.recv_bytes(), latin(''))\r
-\r
-        a.send_bytes(msg, 26, 0)\r
-        self.assertEqual(b.recv_bytes(), latin(''))\r
-\r
-        self.assertRaises(ValueError, a.send_bytes, msg, 27)\r
-        \r
-        self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)\r
-        \r
-        self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)\r
-\r
-        self.assertRaises(ValueError, a.send_bytes, msg, -1)\r
-\r
-        self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)\r
-        \r
-\r
-class _TestListenerClient(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes', 'threads')\r
-\r
-    def _test(self, address):\r
-        conn = self.connection.Client(address)\r
-        conn.send('hello')\r
-        conn.close()\r
-\r
-    def test_listener_client(self):        \r
-        for family in self.connection.families:\r
-            l = self.connection.Listener(family=family)\r
-            p = self.Process(target=self._test, args=(l.address,))\r
-            p.set_daemon(True)\r
-            p.start()\r
-            conn = l.accept()\r
-            self.assertEqual(conn.recv(), 'hello')\r
-            p.join()\r
-            l.close()\r
-\r
-#\r
-# Test of sending connection and socket objects between processes\r
-#\r
-\r
-class _TestPicklingConnections(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def _listener(self, conn, families):\r
-        for fam in families:\r
-            l = self.connection.Listener(family=fam)\r
-            conn.send(l.address)\r
-            new_conn = l.accept()\r
-            conn.send(new_conn)\r
-\r
-        if self.TYPE == 'processes':\r
-            l = socket.socket()\r
-            l.bind(('localhost', 0))\r
-            conn.send(l.getsockname())\r
-            l.listen(1)\r
-            new_conn, addr = l.accept()\r
-            conn.send(new_conn)\r
-        \r
-        conn.recv()\r
-\r
-    def _remote(self, conn):\r
-        for (address, msg) in iter(conn.recv, None):\r
-            client = self.connection.Client(address)\r
-            client.send(msg.upper())\r
-            client.close()\r
-\r
-        if self.TYPE == 'processes':\r
-            address, msg = conn.recv()\r
-            client = socket.socket()\r
-            client.connect(address)\r
-            client.sendall(msg.upper())\r
-            client.close()\r
-\r
-        conn.close()\r
-\r
-    def test_pickling(self):\r
-        try:\r
-            multiprocessing.allow_connection_pickling()\r
-        except ImportError:\r
-            return\r
-        \r
-        families = self.connection.families\r
-\r
-        lconn, lconn0 = self.Pipe()\r
-        lp = self.Process(target=self._listener, args=(lconn0, families))\r
-        lp.start()\r
-        lconn0.close()\r
-\r
-        rconn, rconn0 = self.Pipe()\r
-        rp = self.Process(target=self._remote, args=(rconn0,))\r
-        rp.start()\r
-        rconn0.close()\r
-\r
-        for fam in families:\r
-            msg = ('This connection uses family %s' % fam).encode('ascii')\r
-            address = lconn.recv()\r
-            rconn.send((address, msg))\r
-            new_conn = lconn.recv()\r
-            self.assertEqual(new_conn.recv(), msg.upper())\r
-            \r
-        rconn.send(None)\r
-\r
-        if self.TYPE == 'processes':\r
-            msg = latin('This connection uses a normal socket')\r
-            address = lconn.recv()\r
-            rconn.send((address, msg))\r
-            if hasattr(socket, 'fromfd'):\r
-                new_conn = lconn.recv()\r
-                self.assertEqual(new_conn.recv(100), msg.upper())\r
-            else:\r
-                # XXX On Windows with Py2.6 need to backport fromfd()\r
-                discard = lconn.recv_bytes()\r
-                \r
-        lconn.send(None)\r
-        \r
-        rconn.close()\r
-        lconn.close()\r
-        \r
-        lp.join()\r
-        rp.join()\r
-\r
-#\r
-#\r
-#\r
-\r
-class _TestHeap(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def test_heap(self):\r
-        iterations = 5000\r
-        maxblocks = 50\r
-        blocks = []\r
-\r
-        # create and destroy lots of blocks of different sizes\r
-        for i in xrange(iterations):\r
-            size = int(random.lognormvariate(0, 1) * 1000)\r
-            b = multiprocessing.heap.BufferWrapper(size)\r
-            blocks.append(b)\r
-            if len(blocks) > maxblocks:\r
-                i = random.randrange(maxblocks)\r
-                del blocks[i]\r
-\r
-        # get the heap object\r
-        heap = multiprocessing.heap.BufferWrapper._heap\r
-\r
-        # verify the state of the heap\r
-        all = []\r
-        occupied = 0\r
-        for L in heap._len_to_seq.values():\r
-            for arena, start, stop in L:\r
-                all.append((heap._arenas.index(arena), start, stop,\r
-                            stop-start, 'free'))\r
-        for arena, start, stop in heap._allocated_blocks:\r
-            all.append((heap._arenas.index(arena), start, stop,\r
-                        stop-start, 'occupied'))\r
-            occupied += (stop-start)\r
-\r
-        all.sort()\r
-\r
-        for i in range(len(all)-1):\r
-            (arena, start, stop) = all[i][:3]\r
-            (narena, nstart, nstop) = all[i+1][:3]\r
-            self.assertTrue((arena != narena and nstart == 0) or\r
-                            (stop == nstart))\r
-            \r
-#\r
-#\r
-#\r
-\r
-try:\r
-    from ctypes import Structure, Value, copy, c_int, c_double\r
-except ImportError:\r
-    Structure = object\r
-    c_int = c_double = None\r
-\r
-class _Foo(Structure):\r
-    _fields_ = [\r
-        ('x', c_int),\r
-        ('y', c_double)\r
-        ]\r
-\r
-class _TestSharedCTypes(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def _double(self, x, y, foo, arr, string):\r
-        x.value *= 2\r
-        y.value *= 2\r
-        foo.x *= 2\r
-        foo.y *= 2\r
-        string.value *= 2\r
-        for i in range(len(arr)):\r
-            arr[i] *= 2\r
-\r
-    def test_sharedctypes(self, lock=False):\r
-        if c_int is None:\r
-            return\r
-        \r
-        x = Value('i', 7, lock=lock)\r
-        y = Value(ctypes.c_double, 1.0/3.0, lock=lock)\r
-        foo = Value(_Foo, 3, 2, lock=lock)\r
-        arr = Array('d', range(10), lock=lock)\r
-        string = Array('c', 20, lock=lock)\r
-        string.value = 'hello'\r
-\r
-        p = self.Process(target=self._double, args=(x, y, foo, arr, string))\r
-        p.start()\r
-        p.join()\r
-\r
-        self.assertEqual(x.value, 14)\r
-        self.assertAlmostEqual(y.value, 2.0/3.0)\r
-        self.assertEqual(foo.x, 6)\r
-        self.assertAlmostEqual(foo.y, 4.0)\r
-        for i in range(10):\r
-            self.assertAlmostEqual(arr[i], i*2)\r
-        self.assertEqual(string.value, latin('hellohello'))\r
-\r
-    def test_synchronize(self):\r
-        self.test_sharedctypes(lock=True)\r
-\r
-    def test_copy(self):\r
-        if c_int is None:\r
-            return\r
-\r
-        foo = _Foo(2, 5.0)\r
-        bar = copy(foo)\r
-        foo.x = 0\r
-        foo.y = 0\r
-        self.assertEqual(bar.x, 2)\r
-        self.assertAlmostEqual(bar.y, 5.0)\r
-\r
-#\r
-#\r
-#\r
-\r
-class _TestFinalize(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def _test_finalize(self, conn):\r
-        class Foo(object):\r
-            pass\r
-\r
-        a = Foo()\r
-        util.Finalize(a, conn.send, args=('a',))\r
-        del a           # triggers callback for a\r
-\r
-        b = Foo()\r
-        close_b = util.Finalize(b, conn.send, args=('b',))    \r
-        close_b()       # triggers callback for b\r
-        close_b()       # does nothing because callback has already been called\r
-        del b           # does nothing because callback has already been called\r
-\r
-        c = Foo()\r
-        util.Finalize(c, conn.send, args=('c',))\r
-\r
-        d10 = Foo()\r
-        util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)\r
-\r
-        d01 = Foo()\r
-        util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)\r
-        d02 = Foo()\r
-        util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)\r
-        d03 = Foo()\r
-        util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)\r
-\r
-        util.Finalize(None, conn.send, args=('e',), exitpriority=-10)\r
-\r
-        util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)\r
-\r
-        # call mutliprocessing's cleanup function then exit process without\r
-        # garbage collecting locals\r
-        util._exit_function()\r
-        conn.close()\r
-        os._exit(0)\r
-\r
-    def test_finalize(self):\r
-        conn, child_conn = self.Pipe()\r
-        \r
-        p = self.Process(target=self._test_finalize, args=(child_conn,))\r
-        p.start()\r
-        p.join()\r
-\r
-        result = [obj for obj in iter(conn.recv, 'STOP')]\r
-        self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])\r
-\r
-#\r
-# Test that from ... import * works for each module\r
-#\r
-\r
-class _TestImportStar(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def test_import(self):\r
-        modules = (\r
-            'multiprocessing', 'multiprocessing.connection',\r
-            'multiprocessing.heap', 'multiprocessing.managers',\r
-            'multiprocessing.pool', 'multiprocessing.process',\r
-            'multiprocessing.reduction', 'multiprocessing.sharedctypes',\r
-            'multiprocessing.synchronize', 'multiprocessing.util'\r
-            )\r
-        \r
-        for name in modules:\r
-            __import__(name)\r
-            mod = sys.modules[name]\r
-            \r
-            for attr in getattr(mod, '__all__', ()):\r
-                self.assertTrue(\r
-                    hasattr(mod, attr),\r
-                    '%r does not have attribute %r' % (mod, attr)\r
-                    )\r
-\r
-#\r
-# Quick test that logging works -- does not test logging output\r
-#\r
-\r
-class _TestLogging(BaseTestCase):\r
-\r
-    ALLOWED_TYPES = ('processes',)\r
-\r
-    def test_enable_logging(self):\r
-        logger = multiprocessing.get_logger()\r
-        logger.setLevel(util.SUBWARNING)\r
-        self.assertTrue(logger is not None)\r
-        logger.debug('this will not be printed')\r
-        logger.info('nor will this')\r
-        logger.setLevel(LOG_LEVEL)\r
-\r
-    def _test_level(self, conn):\r
-        logger = multiprocessing.get_logger()\r
-        conn.send(logger.getEffectiveLevel())\r
-\r
-    def test_level(self):\r
-        LEVEL1 = 32\r
-        LEVEL2 = 37\r
-        \r
-        logger = multiprocessing.get_logger()\r
-        root_logger = logging.getLogger()\r
-        root_level = root_logger.level\r
-\r
-        reader, writer = multiprocessing.Pipe(duplex=False)\r
-\r
-        logger.setLevel(LEVEL1)\r
-        self.Process(target=self._test_level, args=(writer,)).start()\r
-        self.assertEqual(LEVEL1, reader.recv())\r
-\r
-        logger.setLevel(logging.NOTSET)\r
-        root_logger.setLevel(LEVEL2)\r
-        self.Process(target=self._test_level, args=(writer,)).start()\r
-        self.assertEqual(LEVEL2, reader.recv())\r
-\r
-        root_logger.setLevel(root_level)\r
-        logger.setLevel(level=LOG_LEVEL)\r
-\r
-#\r
-# Functions used to create test cases from the base ones in this module\r
-#\r
-\r
-def get_attributes(Source, names):\r
-    d = {}\r
-    for name in names:\r
-        obj = getattr(Source, name)\r
-        if type(obj) == type(get_attributes):\r
-            obj = staticmethod(obj)\r
-        d[name] = obj\r
-    return d\r
-\r
-def create_test_cases(Mixin, type):\r
-    result = {}\r
-    glob = globals()\r
-    Type = type[0].upper() + type[1:]\r
-\r
-    for name in glob.keys():\r
-        if name.startswith('_Test'):\r
-            base = glob[name]\r
-            if type in base.ALLOWED_TYPES:\r
-                newname = 'With' + Type + name[1:]\r
-                class Temp(base, unittest.TestCase, Mixin):\r
-                    pass\r
-                result[newname] = Temp\r
-                Temp.__name__ = newname\r
-                Temp.__module__ = Mixin.__module__\r
-    return result\r
-\r
-#\r
-# Create test cases\r
-#\r
-\r
-class ProcessesMixin(object):\r
-    TYPE = 'processes'\r
-    Process = multiprocessing.Process\r
-    locals().update(get_attributes(multiprocessing, (\r
-        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',\r
-        'Condition', 'Event', 'Value', 'Array', 'RawValue',\r
-        'RawArray', 'current_process', 'active_children', 'Pipe',\r
-        'connection', 'JoinableQueue'\r
-        )))\r
-\r
-testcases_processes = create_test_cases(ProcessesMixin, type='processes')\r
-globals().update(testcases_processes)\r
-\r
-\r
-class ManagerMixin(object):\r
-    TYPE = 'manager'\r
-    Process = multiprocessing.Process\r
-    manager = object.__new__(multiprocessing.managers.SyncManager)\r
-    locals().update(get_attributes(manager, (\r
-        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', \r
-       'Condition', 'Event', 'Value', 'Array', 'list', 'dict',\r
-        'Namespace', 'JoinableQueue'\r
-        )))\r
-\r
-testcases_manager = create_test_cases(ManagerMixin, type='manager')\r
-globals().update(testcases_manager)\r
-\r
-\r
-class ThreadsMixin(object):\r
-    TYPE = 'threads'\r
-    Process = multiprocessing.dummy.Process\r
-    locals().update(get_attributes(multiprocessing.dummy, (\r
-        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',\r
-        'Condition', 'Event', 'Value', 'Array', 'current_process',\r
-        'active_children', 'Pipe', 'connection', 'dict', 'list',\r
-        'Namespace', 'JoinableQueue'\r
-        )))\r
-\r
-testcases_threads = create_test_cases(ThreadsMixin, type='threads')\r
-globals().update(testcases_threads)\r
-\r
-#\r
-#\r
-#\r
-\r
-def test_main(run=None):\r
-    if run is None:\r
-        from test.test_support import run_unittest as run\r
-\r
-    util.get_temp_dir()     # creates temp directory for use by all processes\r
-    \r
-    multiprocessing.get_logger().setLevel(LOG_LEVEL)\r
-\r
-    ProcessesMixin.pool = multiprocessing.Pool(4)\r
-    ThreadsMixin.pool = multiprocessing.dummy.Pool(4)\r
-    ManagerMixin.manager.__init__()\r
-    ManagerMixin.manager.start()\r
-    ManagerMixin.pool = ManagerMixin.manager.Pool(4)\r
-\r
-    testcases = (\r
-        sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +\r
-        sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +\r
-        sorted(testcases_manager.values(), key=lambda tc:tc.__name__)\r
-        )\r
-\r
-    loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase\r
-    suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)\r
-    run(suite)\r
-\r
-    ThreadsMixin.pool.terminate()\r
-    ProcessesMixin.pool.terminate()\r
-    ManagerMixin.pool.terminate()\r
-    ManagerMixin.manager.shutdown()\r
-    \r
-    del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool\r
-\r
-def main():\r
-    test_main(unittest.TextTestRunner(verbosity=2).run)\r
-\r
-if __name__ == '__main__':\r
-    main()\r
+#
+# Unit tests for the multiprocessing package
+#
+
+import unittest
+import threading
+import Queue
+import time
+import sys
+import os
+import gc
+import signal
+import array
+import copy
+import socket
+import random
+import logging
+
+import multiprocessing.dummy
+import multiprocessing.connection
+import multiprocessing.managers
+import multiprocessing.heap
+import multiprocessing.managers
+import multiprocessing.pool
+import _multiprocessing
+
+from multiprocessing import util
+
+#
+#
+#
+
+if sys.version_info >= (3, 0):
+    def latin(s):
+        return s.encode('latin')
+else:
+    latin = str
+
+try:
+    bytes
+except NameError:
+    bytes = str
+    def bytearray(seq):
+        return array.array('c', seq)
+
+#
+# Constants
+#
+
+LOG_LEVEL = util.SUBWARNING
+#LOG_LEVEL = logging.WARNING
+
+DELTA = 0.1
+CHECK_TIMINGS = False     # making true makes tests take a lot longer
+                          # and can sometimes cause some non-serious
+                          # failures because some calls block a bit
+                          # longer than expected
+if CHECK_TIMINGS:
+    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
+else:
+    TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
+
+HAVE_GETVALUE = not getattr(_multiprocessing,
+                            'HAVE_BROKEN_SEM_GETVALUE', False)
+
+#
+# Creates a wrapper for a function which records the time it takes to finish
+#
+
+class TimingWrapper(object):
+
+    def __init__(self, func):
+        self.func = func
+        self.elapsed = None
+
+    def __call__(self, *args, **kwds):
+        t = time.time()
+        try:
+            return self.func(*args, **kwds)
+        finally:
+            self.elapsed = time.time() - t
+
+#
+# Base class for test cases
+#
+
+class BaseTestCase(object):
+
+    ALLOWED_TYPES = ('processes', 'manager', 'threads')
+
+    def assertTimingAlmostEqual(self, a, b):
+        if CHECK_TIMINGS:
+            self.assertAlmostEqual(a, b, 1)
+
+    def assertReturnsIfImplemented(self, value, func, *args):
+        try:
+            res = func(*args)
+        except NotImplementedError:
+            pass
+        else:
+            return self.assertEqual(value, res)
+
+#
+# Return the value of a semaphore
+#
+
+def get_value(self):
+    try:
+        return self.get_value()
+    except AttributeError:
+        try:
+            return self._Semaphore__value
+        except AttributeError:
+            try:
+                return self._value
+            except AttributeError:
+                raise NotImplementedError
+
+#
+# Testcases
+#
+
+class _TestProcess(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes', 'threads')
+
+    def test_current(self):
+        if self.TYPE == 'threads':
+            return
+
+        current = self.current_process()
+        authkey = current.get_authkey()
+
+        self.assertTrue(current.is_alive())
+        self.assertTrue(not current.is_daemon())
+        self.assertTrue(isinstance(authkey, bytes))
+        self.assertTrue(len(authkey) > 0)
+        self.assertEqual(current.get_ident(), os.getpid())
+        self.assertEqual(current.get_exitcode(), None)
+
+    def _test(self, q, *args, **kwds):
+        current = self.current_process()
+        q.put(args)
+        q.put(kwds)
+        q.put(current.get_name())
+        if self.TYPE != 'threads':
+            q.put(bytes(current.get_authkey()))
+            q.put(current.pid)
+
+    def test_process(self):
+        q = self.Queue(1)
+        e = self.Event()
+        args = (q, 1, 2)
+        kwargs = {'hello':23, 'bye':2.54}
+        name = 'SomeProcess'
+        p = self.Process(
+            target=self._test, args=args, kwargs=kwargs, name=name
+            )
+        p.set_daemon(True)
+        current = self.current_process()
+
+        if self.TYPE != 'threads':
+            self.assertEquals(p.get_authkey(), current.get_authkey())
+        self.assertEquals(p.is_alive(), False)
+        self.assertEquals(p.is_daemon(), True)
+        self.assertTrue(p not in self.active_children())
+        self.assertTrue(type(self.active_children()) is list)
+        self.assertEqual(p.get_exitcode(), None)
+
+        p.start()
+
+        self.assertEquals(p.get_exitcode(), None)
+        self.assertEquals(p.is_alive(), True)
+        self.assertTrue(p in self.active_children())
+
+        self.assertEquals(q.get(), args[1:])
+        self.assertEquals(q.get(), kwargs)
+        self.assertEquals(q.get(), p.get_name())
+        if self.TYPE != 'threads':
+            self.assertEquals(q.get(), current.get_authkey())
+            self.assertEquals(q.get(), p.pid)
+
+        p.join()
+
+        self.assertEquals(p.get_exitcode(), 0)
+        self.assertEquals(p.is_alive(), False)
+        self.assertTrue(p not in self.active_children())
+
+    def _test_terminate(self):
+        time.sleep(1000)
+
+    def test_terminate(self):
+        if self.TYPE == 'threads':
+            return
+
+        p = self.Process(target=self._test_terminate)
+        p.set_daemon(True)
+        p.start()
+
+        self.assertEqual(p.is_alive(), True)
+        self.assertTrue(p in self.active_children())
+        self.assertEqual(p.get_exitcode(), None)
+
+        p.terminate()
+
+        join = TimingWrapper(p.join)
+        self.assertEqual(join(), None)
+        self.assertTimingAlmostEqual(join.elapsed, 0.0)
+
+        self.assertEqual(p.is_alive(), False)
+        self.assertTrue(p not in self.active_children())
+
+        p.join()
+
+        # XXX sometimes get p.get_exitcode() == 0 on Windows ...
+        #self.assertEqual(p.get_exitcode(), -signal.SIGTERM)
+
+    def test_cpu_count(self):
+        try:
+            cpus = multiprocessing.cpu_count()
+        except NotImplementedError:
+            cpus = 1
+        self.assertTrue(type(cpus) is int)
+        self.assertTrue(cpus >= 1)
+
+    def test_active_children(self):
+        self.assertEqual(type(self.active_children()), list)
+
+        p = self.Process(target=time.sleep, args=(DELTA,))
+        self.assertTrue(p not in self.active_children())
+
+        p.start()
+        self.assertTrue(p in self.active_children())
+
+        p.join()
+        self.assertTrue(p not in self.active_children())
+
+    def _test_recursion(self, wconn, id):
+        from multiprocessing import forking
+        wconn.send(id)
+        if len(id) < 2:
+            for i in range(2):
+                p = self.Process(
+                    target=self._test_recursion, args=(wconn, id+[i])
+                    )
+                p.start()
+                p.join()
+
+    def test_recursion(self):
+        rconn, wconn = self.Pipe(duplex=False)
+        self._test_recursion(wconn, [])
+
+        time.sleep(DELTA)
+        result = []
+        while rconn.poll():
+            result.append(rconn.recv())
+
+        expected = [
+            [],
+              [0],
+                [0, 0],
+                [0, 1],
+              [1],
+                [1, 0],
+                [1, 1]
+            ]
+        self.assertEqual(result, expected)
+
+#
+#
+#
+
+class _UpperCaser(multiprocessing.Process):
+
+    def __init__(self):
+        multiprocessing.Process.__init__(self)
+        self.child_conn, self.parent_conn = multiprocessing.Pipe()
+
+    def run(self):
+        self.parent_conn.close()
+        for s in iter(self.child_conn.recv, None):
+            self.child_conn.send(s.upper())
+        self.child_conn.close()
+
+    def submit(self, s):
+        assert type(s) is str
+        self.parent_conn.send(s)
+        return self.parent_conn.recv()
+
+    def stop(self):
+        self.parent_conn.send(None)
+        self.parent_conn.close()
+        self.child_conn.close()
+
+class _TestSubclassingProcess(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_subclassing(self):
+        uppercaser = _UpperCaser()
+        uppercaser.start()
+        self.assertEqual(uppercaser.submit('hello'), 'HELLO')
+        self.assertEqual(uppercaser.submit('world'), 'WORLD')
+        uppercaser.stop()
+        uppercaser.join()
+
+#
+#
+#
+
+def queue_empty(q):
+    if hasattr(q, 'empty'):
+        return q.empty()
+    else:
+        return q.qsize() == 0
+
+def queue_full(q, maxsize):
+    if hasattr(q, 'full'):
+        return q.full()
+    else:
+        return q.qsize() == maxsize
+
+
+class _TestQueue(BaseTestCase):
+
+
+    def _test_put(self, queue, child_can_start, parent_can_continue):
+        child_can_start.wait()
+        for i in range(6):
+            queue.get()
+        parent_can_continue.set()
+
+    def test_put(self):
+        MAXSIZE = 6
+        queue = self.Queue(maxsize=MAXSIZE)
+        child_can_start = self.Event()
+        parent_can_continue = self.Event()
+
+        proc = self.Process(
+            target=self._test_put,
+            args=(queue, child_can_start, parent_can_continue)
+            )
+        proc.set_daemon(True)
+        proc.start()
+
+        self.assertEqual(queue_empty(queue), True)
+        self.assertEqual(queue_full(queue, MAXSIZE), False)
+
+        queue.put(1)
+        queue.put(2, True)
+        queue.put(3, True, None)
+        queue.put(4, False)
+        queue.put(5, False, None)
+        queue.put_nowait(6)
+
+        # the values may be in buffer but not yet in pipe so sleep a bit
+        time.sleep(DELTA)
+
+        self.assertEqual(queue_empty(queue), False)
+        self.assertEqual(queue_full(queue, MAXSIZE), True)
+
+        put = TimingWrapper(queue.put)
+        put_nowait = TimingWrapper(queue.put_nowait)
+
+        self.assertRaises(Queue.Full, put, 7, False)
+        self.assertTimingAlmostEqual(put.elapsed, 0)
+
+        self.assertRaises(Queue.Full, put, 7, False, None)
+        self.assertTimingAlmostEqual(put.elapsed, 0)
+
+        self.assertRaises(Queue.Full, put_nowait, 7)
+        self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
+
+        self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
+        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
+
+        self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
+        self.assertTimingAlmostEqual(put.elapsed, 0)
+
+        self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
+        self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
+
+        child_can_start.set()
+        parent_can_continue.wait()
+
+        self.assertEqual(queue_empty(queue), True)
+        self.assertEqual(queue_full(queue, MAXSIZE), False)
+
+        proc.join()
+
+    def _test_get(self, queue, child_can_start, parent_can_continue):
+        child_can_start.wait()
+        queue.put(1)
+        queue.put(2)
+        queue.put(3)
+        queue.put(4)
+        queue.put(5)
+        parent_can_continue.set()
+
+    def test_get(self):
+        queue = self.Queue()
+        child_can_start = self.Event()
+        parent_can_continue = self.Event()
+
+        proc = self.Process(
+            target=self._test_get,
+            args=(queue, child_can_start, parent_can_continue)
+            )
+        proc.set_daemon(True)
+        proc.start()
+
+        self.assertEqual(queue_empty(queue), True)
+
+        child_can_start.set()
+        parent_can_continue.wait()
+
+        time.sleep(DELTA)
+        self.assertEqual(queue_empty(queue), False)
+
+        self.assertEqual(queue.get(), 1)
+        self.assertEqual(queue.get(True, None), 2)
+        self.assertEqual(queue.get(True), 3)
+        self.assertEqual(queue.get(timeout=1), 4)
+        self.assertEqual(queue.get_nowait(), 5)
+
+        self.assertEqual(queue_empty(queue), True)
+
+        get = TimingWrapper(queue.get)
+        get_nowait = TimingWrapper(queue.get_nowait)
+
+        self.assertRaises(Queue.Empty, get, False)
+        self.assertTimingAlmostEqual(get.elapsed, 0)
+
+        self.assertRaises(Queue.Empty, get, False, None)
+        self.assertTimingAlmostEqual(get.elapsed, 0)
+
+        self.assertRaises(Queue.Empty, get_nowait)
+        self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
+
+        self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
+
+        self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
+        self.assertTimingAlmostEqual(get.elapsed, 0)
+
+        self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
+
+        proc.join()
+
+    def _test_fork(self, queue):
+        for i in range(10, 20):
+            queue.put(i)
+        # note that at this point the items may only be buffered, so the
+        # process cannot shutdown until the feeder thread has finished
+        # pushing items onto the pipe.
+
+    def test_fork(self):
+        # Old versions of Queue would fail to create a new feeder
+        # thread for a forked process if the original process had its
+        # own feeder thread.  This test checks that this no longer
+        # happens.
+
+        queue = self.Queue()
+
+        # put items on queue so that main process starts a feeder thread
+        for i in range(10):
+            queue.put(i)
+
+        # wait to make sure thread starts before we fork a new process
+        time.sleep(DELTA)
+
+        # fork process
+        p = self.Process(target=self._test_fork, args=(queue,))
+        p.start()
+
+        # check that all expected items are in the queue
+        for i in range(20):
+            self.assertEqual(queue.get(), i)
+        self.assertRaises(Queue.Empty, queue.get, False)
+
+        p.join()
+
+    def test_qsize(self):
+        q = self.Queue()
+        try:
+            self.assertEqual(q.qsize(), 0)
+        except NotImplementedError:
+            return
+        q.put(1)
+        self.assertEqual(q.qsize(), 1)
+        q.put(5)
+        self.assertEqual(q.qsize(), 2)
+        q.get()
+        self.assertEqual(q.qsize(), 1)
+        q.get()
+        self.assertEqual(q.qsize(), 0)
+
+    def _test_task_done(self, q):
+        for obj in iter(q.get, None):
+            time.sleep(DELTA)
+            q.task_done()
+
+    def test_task_done(self):
+        queue = self.JoinableQueue()
+
+        if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
+            return
+
+        workers = [self.Process(target=self._test_task_done, args=(queue,))
+                   for i in xrange(4)]
+
+        for p in workers:
+            p.start()
+
+        for i in xrange(10):
+            queue.put(i)
+
+        queue.join()
+
+        for p in workers:
+            queue.put(None)
+
+        for p in workers:
+            p.join()
+
+#
+#
+#
+
+class _TestLock(BaseTestCase):
+
+    def test_lock(self):
+        lock = self.Lock()
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.acquire(False), False)
+        self.assertEqual(lock.release(), None)
+        self.assertRaises((ValueError, threading.ThreadError), lock.release)
+
+    def test_rlock(self):
+        lock = self.RLock()
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.acquire(), True)
+        self.assertEqual(lock.release(), None)
+        self.assertEqual(lock.release(), None)
+        self.assertEqual(lock.release(), None)
+        self.assertRaises((AssertionError, RuntimeError), lock.release)
+
+
+class _TestSemaphore(BaseTestCase):
+
+    def _test_semaphore(self, sem):
+        self.assertReturnsIfImplemented(2, get_value, sem)
+        self.assertEqual(sem.acquire(), True)
+        self.assertReturnsIfImplemented(1, get_value, sem)
+        self.assertEqual(sem.acquire(), True)
+        self.assertReturnsIfImplemented(0, get_value, sem)
+        self.assertEqual(sem.acquire(False), False)
+        self.assertReturnsIfImplemented(0, get_value, sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(1, get_value, sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(2, get_value, sem)
+
+    def test_semaphore(self):
+        sem = self.Semaphore(2)
+        self._test_semaphore(sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(3, get_value, sem)
+        self.assertEqual(sem.release(), None)
+        self.assertReturnsIfImplemented(4, get_value, sem)
+
+    def test_bounded_semaphore(self):
+        sem = self.BoundedSemaphore(2)
+        self._test_semaphore(sem)
+        # Currently fails on OS/X
+        #if HAVE_GETVALUE:
+        #    self.assertRaises(ValueError, sem.release)
+        #    self.assertReturnsIfImplemented(2, get_value, sem)
+
+    def test_timeout(self):
+        if self.TYPE != 'processes':
+            return
+
+        sem = self.Semaphore(0)
+        acquire = TimingWrapper(sem.acquire)
+
+        self.assertEqual(acquire(False), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
+
+        self.assertEqual(acquire(False, None), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
+
+        self.assertEqual(acquire(False, TIMEOUT1), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, 0)
+
+        self.assertEqual(acquire(True, TIMEOUT2), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
+
+        self.assertEqual(acquire(timeout=TIMEOUT3), False)
+        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
+
+
+class _TestCondition(BaseTestCase):
+
+    def f(self, cond, sleeping, woken, timeout=None):
+        cond.acquire()
+        sleeping.release()
+        cond.wait(timeout)
+        woken.release()
+        cond.release()
+
+    def check_invariant(self, cond):
+        # this is only supposed to succeed when there are no sleepers
+        if self.TYPE == 'processes':
+            try:
+                sleepers = (cond._sleeping_count.get_value() -
+                            cond._woken_count.get_value())
+                self.assertEqual(sleepers, 0)
+                self.assertEqual(cond._wait_semaphore.get_value(), 0)
+            except NotImplementedError:
+                pass
+
+    def test_notify(self):
+        cond = self.Condition()
+        sleeping = self.Semaphore(0)
+        woken = self.Semaphore(0)
+
+        p = self.Process(target=self.f, args=(cond, sleeping, woken))
+        p.set_daemon(True)
+        p.start()
+
+        p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
+        p.set_daemon(True)
+        p.start()
+
+        # wait for both children to start sleeping
+        sleeping.acquire()
+        sleeping.acquire()
+
+        # check no process/thread has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(0, get_value, woken)
+
+        # wake up one process/thread
+        cond.acquire()
+        cond.notify()
+        cond.release()
+
+        # check one process/thread has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(1, get_value, woken)
+
+        # wake up another
+        cond.acquire()
+        cond.notify()
+        cond.release()
+
+        # check other has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(2, get_value, woken)
+
+        # check state is not mucked up
+        self.check_invariant(cond)
+        p.join()
+
+    def test_notify_all(self):
+        cond = self.Condition()
+        sleeping = self.Semaphore(0)
+        woken = self.Semaphore(0)
+
+        # start some threads/processes which will timeout
+        for i in range(3):
+            p = self.Process(target=self.f,
+                             args=(cond, sleeping, woken, TIMEOUT1))
+            p.set_daemon(True)
+            p.start()
+
+            t = threading.Thread(target=self.f,
+                                 args=(cond, sleeping, woken, TIMEOUT1))
+            t.set_daemon(True)
+            t.start()
+
+        # wait for them all to sleep
+        for i in xrange(6):
+            sleeping.acquire()
+
+        # check they have all timed out
+        for i in xrange(6):
+            woken.acquire()
+        self.assertReturnsIfImplemented(0, get_value, woken)
+
+        # check state is not mucked up
+        self.check_invariant(cond)
+
+        # start some more threads/processes
+        for i in range(3):
+            p = self.Process(target=self.f, args=(cond, sleeping, woken))
+            p.set_daemon(True)
+            p.start()
+
+            t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
+            t.set_daemon(True)
+            t.start()
+
+        # wait for them to all sleep
+        for i in xrange(6):
+            sleeping.acquire()
+
+        # check no process/thread has woken up
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(0, get_value, woken)
+
+        # wake them all up
+        cond.acquire()
+        cond.notify_all()
+        cond.release()
+
+        # check they have all woken
+        time.sleep(DELTA)
+        self.assertReturnsIfImplemented(6, get_value, woken)
+
+        # check state is not mucked up
+        self.check_invariant(cond)
+
+    def test_timeout(self):
+        cond = self.Condition()
+        wait = TimingWrapper(cond.wait)
+        cond.acquire()
+        res = wait(TIMEOUT1)
+        cond.release()
+        self.assertEqual(res, None)
+        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
+
+
+class _TestEvent(BaseTestCase):
+
+    def _test_event(self, event):
+        time.sleep(TIMEOUT2)
+        event.set()
+
+    def test_event(self):
+        event = self.Event()
+        wait = TimingWrapper(event.wait)
+
+        # Removed temporaily, due to API shear, this does not
+        # work with threading._Event objects. is_set == isSet
+        #self.assertEqual(event.is_set(), False)
+
+        self.assertEqual(wait(0.0), None)
+        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
+        self.assertEqual(wait(TIMEOUT1), None)
+        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
+
+        event.set()
+
+        # See note above on the API differences
+        # self.assertEqual(event.is_set(), True)
+        self.assertEqual(wait(), None)
+        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
+        self.assertEqual(wait(TIMEOUT1), None)
+        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
+        # self.assertEqual(event.is_set(), True)
+
+        event.clear()
+
+        #self.assertEqual(event.is_set(), False)
+
+        self.Process(target=self._test_event, args=(event,)).start()
+        self.assertEqual(wait(), None)
+
+#
+#
+#
+
+class _TestValue(BaseTestCase):
+
+    codes_values = [
+        ('i', 4343, 24234),
+        ('d', 3.625, -4.25),
+        ('h', -232, 234),
+        ('c', latin('x'), latin('y'))
+        ]
+
+    def _test(self, values):
+        for sv, cv in zip(values, self.codes_values):
+            sv.value = cv[2]
+
+
+    def test_value(self, raw=False):
+        if self.TYPE != 'processes':
+            return
+
+        if raw:
+            values = [self.RawValue(code, value)
+                      for code, value, _ in self.codes_values]
+        else:
+            values = [self.Value(code, value)
+                      for code, value, _ in self.codes_values]
+
+        for sv, cv in zip(values, self.codes_values):
+            self.assertEqual(sv.value, cv[1])
+
+        proc = self.Process(target=self._test, args=(values,))
+        proc.start()
+        proc.join()
+
+        for sv, cv in zip(values, self.codes_values):
+            self.assertEqual(sv.value, cv[2])
+
+    def test_rawvalue(self):
+        self.test_value(raw=True)
+
+    def test_getobj_getlock(self):
+        if self.TYPE != 'processes':
+            return
+
+        val1 = self.Value('i', 5)
+        lock1 = val1.get_lock()
+        obj1 = val1.get_obj()
+
+        val2 = self.Value('i', 5, lock=None)
+        lock2 = val2.get_lock()
+        obj2 = val2.get_obj()
+
+        lock = self.Lock()
+        val3 = self.Value('i', 5, lock=lock)
+        lock3 = val3.get_lock()
+        obj3 = val3.get_obj()
+        self.assertEqual(lock, lock3)
+
+        arr4 = self.RawValue('i', 5)
+        self.assertFalse(hasattr(arr4, 'get_lock'))
+        self.assertFalse(hasattr(arr4, 'get_obj'))
+
+
+class _TestArray(BaseTestCase):
+
+    def f(self, seq):
+        for i in range(1, len(seq)):
+            seq[i] += seq[i-1]
+
+    def test_array(self, raw=False):
+        if self.TYPE != 'processes':
+            return
+
+        seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
+        if raw:
+            arr = self.RawArray('i', seq)
+        else:
+            arr = self.Array('i', seq)
+
+        self.assertEqual(len(arr), len(seq))
+        self.assertEqual(arr[3], seq[3])
+        self.assertEqual(list(arr[2:7]), list(seq[2:7]))
+
+        arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
+
+        self.assertEqual(list(arr[:]), seq)
+
+        self.f(seq)
+
+        p = self.Process(target=self.f, args=(arr,))
+        p.start()
+        p.join()
+
+        self.assertEqual(list(arr[:]), seq)
+
+    def test_rawarray(self):
+        self.test_array(raw=True)
+
+    def test_getobj_getlock_obj(self):
+        if self.TYPE != 'processes':
+            return
+
+        arr1 = self.Array('i', range(10))
+        lock1 = arr1.get_lock()
+        obj1 = arr1.get_obj()
+
+        arr2 = self.Array('i', range(10), lock=None)
+        lock2 = arr2.get_lock()
+        obj2 = arr2.get_obj()
+
+        lock = self.Lock()
+        arr3 = self.Array('i', range(10), lock=lock)
+        lock3 = arr3.get_lock()
+        obj3 = arr3.get_obj()
+        self.assertEqual(lock, lock3)
+
+        arr4 = self.RawArray('i', range(10))
+        self.assertFalse(hasattr(arr4, 'get_lock'))
+        self.assertFalse(hasattr(arr4, 'get_obj'))
+
+#
+#
+#
+
+class _TestContainers(BaseTestCase):
+
+    ALLOWED_TYPES = ('manager',)
+
+    def test_list(self):
+        a = self.list(range(10))
+        self.assertEqual(a[:], range(10))
+
+        b = self.list()
+        self.assertEqual(b[:], [])
+
+        b.extend(range(5))
+        self.assertEqual(b[:], range(5))
+
+        self.assertEqual(b[2], 2)
+        self.assertEqual(b[2:10], [2,3,4])
+
+        b *= 2
+        self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
+
+        self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
+
+        self.assertEqual(a[:], range(10))
+
+        d = [a, b]
+        e = self.list(d)
+        self.assertEqual(
+            e[:],
+            [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
+            )
+
+        f = self.list([a])
+        a.append('hello')
+        self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
+
+    def test_dict(self):
+        d = self.dict()
+        indices = range(65, 70)
+        for i in indices:
+            d[i] = chr(i)
+        self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
+        self.assertEqual(sorted(d.keys()), indices)
+        self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
+        self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
+
+    def test_namespace(self):
+        n = self.Namespace()
+        n.name = 'Bob'
+        n.job = 'Builder'
+        n._hidden = 'hidden'
+        self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
+        del n.job
+        self.assertEqual(str(n), "Namespace(name='Bob')")
+        self.assertTrue(hasattr(n, 'name'))
+        self.assertTrue(not hasattr(n, 'job'))
+
+#
+#
+#
+
+def sqr(x, wait=0.0):
+    time.sleep(wait)
+    return x*x
+
+class _TestPool(BaseTestCase):
+
+    def test_apply(self):
+        papply = self.pool.apply
+        self.assertEqual(papply(sqr, (5,)), sqr(5))
+        self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
+
+    def test_map(self):
+        pmap = self.pool.map
+        self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
+        self.assertEqual(pmap(sqr, range(100), chunksize=20),
+                         map(sqr, range(100)))
+
+    def test_async(self):
+        res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
+        get = TimingWrapper(res.get)
+        self.assertEqual(get(), 49)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
+
+    def test_async_timeout(self):
+        res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
+        get = TimingWrapper(res.get)
+        self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
+        self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
+
+    def test_imap(self):
+        it = self.pool.imap(sqr, range(10))
+        self.assertEqual(list(it), map(sqr, range(10)))
+
+        it = self.pool.imap(sqr, range(10))
+        for i in range(10):
+            self.assertEqual(it.next(), i*i)
+        self.assertRaises(StopIteration, it.next)
+
+        it = self.pool.imap(sqr, range(1000), chunksize=100)
+        for i in range(1000):
+            self.assertEqual(it.next(), i*i)
+        self.assertRaises(StopIteration, it.next)
+
+    def test_imap_unordered(self):
+        it = self.pool.imap_unordered(sqr, range(1000))
+        self.assertEqual(sorted(it), map(sqr, range(1000)))
+
+        it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
+        self.assertEqual(sorted(it), map(sqr, range(1000)))
+
+    def test_make_pool(self):
+        p = multiprocessing.Pool(3)
+        self.assertEqual(3, len(p._pool))
+        p.close()
+        p.join()
+
+    def test_terminate(self):
+        if self.TYPE == 'manager':
+            # On Unix a forked process increfs each shared object to
+            # which its parent process held a reference.  If the
+            # forked process gets terminated then there is likely to
+            # be a reference leak.  So to prevent
+            # _TestZZZNumberOfObjects from failing we skip this test
+            # when using a manager.
+            return
+
+        result = self.pool.map_async(
+            time.sleep, [0.1 for i in range(10000)], chunksize=1
+            )
+        self.pool.terminate()
+        join = TimingWrapper(self.pool.join)
+        join()
+        self.assertTrue(join.elapsed < 0.2)
+
+#
+# Test that manager has expected number of shared objects left
+#
+
+class _TestZZZNumberOfObjects(BaseTestCase):
+    # Because test cases are sorted alphabetically, this one will get
+    # run after all the other tests for the manager.  It tests that
+    # there have been no "reference leaks" for the manager's shared
+    # objects.  Note the comment in _TestPool.test_terminate().
+    ALLOWED_TYPES = ('manager',)
+
+    def test_number_of_objects(self):
+        EXPECTED_NUMBER = 1                # the pool object is still alive
+        multiprocessing.active_children()  # discard dead process objs
+        gc.collect()                       # do garbage collection
+        refs = self.manager._number_of_objects()
+        if refs != EXPECTED_NUMBER:
+            print self.manager._debugInfo()
+
+        self.assertEqual(refs, EXPECTED_NUMBER)
+
+#
+# Test of creating a customized manager class
+#
+
+from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
+
+class FooBar(object):
+    def f(self):
+        return 'f()'
+    def g(self):
+        raise ValueError
+    def _h(self):
+        return '_h()'
+
+def baz():
+    for i in xrange(10):
+        yield i*i
+
+class IteratorProxy(BaseProxy):
+    _exposed_ = ('next', '__next__')
+    def __iter__(self):
+        return self
+    def next(self):
+        return self._callmethod('next')
+    def __next__(self):
+        return self._callmethod('__next__')
+
+class MyManager(BaseManager):
+    pass
+
+MyManager.register('Foo', callable=FooBar)
+MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
+MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
+
+
+class _TestMyManager(BaseTestCase):
+
+    ALLOWED_TYPES = ('manager',)
+
+    def test_mymanager(self):
+        manager = MyManager()
+        manager.start()
+
+        foo = manager.Foo()
+        bar = manager.Bar()
+        baz = manager.baz()
+
+        foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
+        bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
+
+        self.assertEqual(foo_methods, ['f', 'g'])
+        self.assertEqual(bar_methods, ['f', '_h'])
+
+        self.assertEqual(foo.f(), 'f()')
+        self.assertRaises(ValueError, foo.g)
+        self.assertEqual(foo._callmethod('f'), 'f()')
+        self.assertRaises(RemoteError, foo._callmethod, '_h')
+
+        self.assertEqual(bar.f(), 'f()')
+        self.assertEqual(bar._h(), '_h()')
+        self.assertEqual(bar._callmethod('f'), 'f()')
+        self.assertEqual(bar._callmethod('_h'), '_h()')
+
+        self.assertEqual(list(baz), [i*i for i in range(10)])
+
+        manager.shutdown()
+
+#
+# Test of connecting to a remote server and using xmlrpclib for serialization
+#
+
+_queue = Queue.Queue()
+def get_queue():
+    return _queue
+
+class QueueManager(BaseManager):
+    '''manager class used by server process'''
+QueueManager.register('get_queue', callable=get_queue)
+
+class QueueManager2(BaseManager):
+    '''manager class which specifies the same interface as QueueManager'''
+QueueManager2.register('get_queue')
+
+
+SERIALIZER = 'xmlrpclib'
+
+class _TestRemoteManager(BaseTestCase):
+
+    ALLOWED_TYPES = ('manager',)
+
+    def _putter(self, address, authkey):
+        manager = QueueManager2(
+            address=address, authkey=authkey, serializer=SERIALIZER
+            )
+        manager.connect()
+        queue = manager.get_queue()
+        queue.put(('hello world', None, True, 2.25))
+
+    def test_remote(self):
+        authkey = os.urandom(32)
+
+        manager = QueueManager(
+            address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
+            )
+        manager.start()
+
+        p = self.Process(target=self._putter, args=(manager.address, authkey))
+        p.start()
+
+        manager2 = QueueManager2(
+            address=manager.address, authkey=authkey, serializer=SERIALIZER
+            )
+        manager2.connect()
+        queue = manager2.get_queue()
+
+        # Note that xmlrpclib will deserialize object as a list not a tuple
+        self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
+
+        # Because we are using xmlrpclib for serialization instead of
+        # pickle this will cause a serialization error.
+        self.assertRaises(Exception, queue.put, time.sleep)
+
+        # Make queue finalizer run before the server is stopped
+        del queue
+        manager.shutdown()
+
+#
+#
+#
+
+SENTINEL = latin('')
+
+class _TestConnection(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes', 'threads')
+
+    def _echo(self, conn):
+        for msg in iter(conn.recv_bytes, SENTINEL):
+            conn.send_bytes(msg)
+        conn.close()
+
+    def test_connection(self):
+        conn, child_conn = self.Pipe()
+
+        p = self.Process(target=self._echo, args=(child_conn,))
+        p.set_daemon(True)
+        p.start()
+
+        seq = [1, 2.25, None]
+        msg = latin('hello world')
+        longmsg = msg * 10
+        arr = array.array('i', range(4))
+
+        if self.TYPE == 'processes':
+            self.assertEqual(type(conn.fileno()), int)
+
+        self.assertEqual(conn.send(seq), None)
+        self.assertEqual(conn.recv(), seq)
+
+        self.assertEqual(conn.send_bytes(msg), None)
+        self.assertEqual(conn.recv_bytes(), msg)
+
+        if self.TYPE == 'processes':
+            buffer = array.array('i', [0]*10)
+            expected = list(arr) + [0] * (10 - len(arr))
+            self.assertEqual(conn.send_bytes(arr), None)
+            self.assertEqual(conn.recv_bytes_into(buffer),
+                             len(arr) * buffer.itemsize)
+            self.assertEqual(list(buffer), expected)
+
+            buffer = array.array('i', [0]*10)
+            expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
+            self.assertEqual(conn.send_bytes(arr), None)
+            self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
+                             len(arr) * buffer.itemsize)
+            self.assertEqual(list(buffer), expected)
+
+            buffer = bytearray(latin(' ' * 40))
+            self.assertEqual(conn.send_bytes(longmsg), None)
+            try:
+                res = conn.recv_bytes_into(buffer)
+            except multiprocessing.BufferTooShort, e:
+                self.assertEqual(e.args, (longmsg,))
+            else:
+                self.fail('expected BufferTooShort, got %s' % res)
+
+        poll = TimingWrapper(conn.poll)
+
+        self.assertEqual(poll(), False)
+        self.assertTimingAlmostEqual(poll.elapsed, 0)
+
+        self.assertEqual(poll(TIMEOUT1), False)
+        self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
+
+        conn.send(None)
+
+        self.assertEqual(poll(TIMEOUT1), True)
+        self.assertTimingAlmostEqual(poll.elapsed, 0)
+
+        self.assertEqual(conn.recv(), None)
+
+        really_big_msg = latin('X') * (1024 * 1024 * 16)   # 16Mb
+        conn.send_bytes(really_big_msg)
+        self.assertEqual(conn.recv_bytes(), really_big_msg)
+
+        conn.send_bytes(SENTINEL)                          # tell child to quit
+        child_conn.close()
+
+        if self.TYPE == 'processes':
+            self.assertEqual(conn.readable, True)
+            self.assertEqual(conn.writable, True)
+            self.assertRaises(EOFError, conn.recv)
+            self.assertRaises(EOFError, conn.recv_bytes)
+
+        p.join()
+
+    def test_duplex_false(self):
+        reader, writer = self.Pipe(duplex=False)
+        self.assertEqual(writer.send(1), None)
+        self.assertEqual(reader.recv(), 1)
+        if self.TYPE == 'processes':
+            self.assertEqual(reader.readable, True)
+            self.assertEqual(reader.writable, False)
+            self.assertEqual(writer.readable, False)
+            self.assertEqual(writer.writable, True)
+            self.assertRaises(IOError, reader.send, 2)
+            self.assertRaises(IOError, writer.recv)
+            self.assertRaises(IOError, writer.poll)
+
+    def test_spawn_close(self):
+        # We test that a pipe connection can be closed by parent
+        # process immediately after child is spawned.  On Windows this
+        # would have sometimes failed on old versions because
+        # child_conn would be closed before the child got a chance to
+        # duplicate it.
+        conn, child_conn = self.Pipe()
+
+        p = self.Process(target=self._echo, args=(child_conn,))
+        p.start()
+        child_conn.close()    # this might complete before child initializes
+
+        msg = latin('hello')
+        conn.send_bytes(msg)
+        self.assertEqual(conn.recv_bytes(), msg)
+
+        conn.send_bytes(SENTINEL)
+        conn.close()
+        p.join()
+
+    def test_sendbytes(self):
+        if self.TYPE != 'processes':
+            return
+
+        msg = latin('abcdefghijklmnopqrstuvwxyz')
+        a, b = self.Pipe()
+
+        a.send_bytes(msg)
+        self.assertEqual(b.recv_bytes(), msg)
+
+        a.send_bytes(msg, 5)
+        self.assertEqual(b.recv_bytes(), msg[5:])
+
+        a.send_bytes(msg, 7, 8)
+        self.assertEqual(b.recv_bytes(), msg[7:7+8])
+
+        a.send_bytes(msg, 26)
+        self.assertEqual(b.recv_bytes(), latin(''))
+
+        a.send_bytes(msg, 26, 0)
+        self.assertEqual(b.recv_bytes(), latin(''))
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 27)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, -1)
+
+        self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
+
+
+class _TestListenerClient(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes', 'threads')
+
+    def _test(self, address):
+        conn = self.connection.Client(address)
+        conn.send('hello')
+        conn.close()
+
+    def test_listener_client(self):
+        for family in self.connection.families:
+            l = self.connection.Listener(family=family)
+            p = self.Process(target=self._test, args=(l.address,))
+            p.set_daemon(True)
+            p.start()
+            conn = l.accept()
+            self.assertEqual(conn.recv(), 'hello')
+            p.join()
+            l.close()
+
+#
+# Test of sending connection and socket objects between processes
+#
+
+class _TestPicklingConnections(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def _listener(self, conn, families):
+        for fam in families:
+            l = self.connection.Listener(family=fam)
+            conn.send(l.address)
+            new_conn = l.accept()
+            conn.send(new_conn)
+
+        if self.TYPE == 'processes':
+            l = socket.socket()
+            l.bind(('localhost', 0))
+            conn.send(l.getsockname())
+            l.listen(1)
+            new_conn, addr = l.accept()
+            conn.send(new_conn)
+
+        conn.recv()
+
+    def _remote(self, conn):
+        for (address, msg) in iter(conn.recv, None):
+            client = self.connection.Client(address)
+            client.send(msg.upper())
+            client.close()
+
+        if self.TYPE == 'processes':
+            address, msg = conn.recv()
+            client = socket.socket()
+            client.connect(address)
+            client.sendall(msg.upper())
+            client.close()
+
+        conn.close()
+
+    def test_pickling(self):
+        try:
+            multiprocessing.allow_connection_pickling()
+        except ImportError:
+            return
+
+        families = self.connection.families
+
+        lconn, lconn0 = self.Pipe()
+        lp = self.Process(target=self._listener, args=(lconn0, families))
+        lp.start()
+        lconn0.close()
+
+        rconn, rconn0 = self.Pipe()
+        rp = self.Process(target=self._remote, args=(rconn0,))
+        rp.start()
+        rconn0.close()
+
+        for fam in families:
+            msg = ('This connection uses family %s' % fam).encode('ascii')
+            address = lconn.recv()
+            rconn.send((address, msg))
+            new_conn = lconn.recv()
+            self.assertEqual(new_conn.recv(), msg.upper())
+
+        rconn.send(None)
+
+        if self.TYPE == 'processes':
+            msg = latin('This connection uses a normal socket')
+            address = lconn.recv()
+            rconn.send((address, msg))
+            if hasattr(socket, 'fromfd'):
+                new_conn = lconn.recv()
+                self.assertEqual(new_conn.recv(100), msg.upper())
+            else:
+                # XXX On Windows with Py2.6 need to backport fromfd()
+                discard = lconn.recv_bytes()
+
+        lconn.send(None)
+
+        rconn.close()
+        lconn.close()
+
+        lp.join()
+        rp.join()
+
+#
+#
+#
+
+class _TestHeap(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_heap(self):
+        iterations = 5000
+        maxblocks = 50
+        blocks = []
+
+        # create and destroy lots of blocks of different sizes
+        for i in xrange(iterations):
+            size = int(random.lognormvariate(0, 1) * 1000)
+            b = multiprocessing.heap.BufferWrapper(size)
+            blocks.append(b)
+            if len(blocks) > maxblocks:
+                i = random.randrange(maxblocks)
+                del blocks[i]
+
+        # get the heap object
+        heap = multiprocessing.heap.BufferWrapper._heap
+
+        # verify the state of the heap
+        all = []
+        occupied = 0
+        for L in heap._len_to_seq.values():
+            for arena, start, stop in L:
+                all.append((heap._arenas.index(arena), start, stop,
+                            stop-start, 'free'))
+        for arena, start, stop in heap._allocated_blocks:
+            all.append((heap._arenas.index(arena), start, stop,
+                        stop-start, 'occupied'))
+            occupied += (stop-start)
+
+        all.sort()
+
+        for i in range(len(all)-1):
+            (arena, start, stop) = all[i][:3]
+            (narena, nstart, nstop) = all[i+1][:3]
+            self.assertTrue((arena != narena and nstart == 0) or
+                            (stop == nstart))
+
+#
+#
+#
+
+try:
+    from ctypes import Structure, Value, copy, c_int, c_double
+except ImportError:
+    Structure = object
+    c_int = c_double = None
+
+class _Foo(Structure):
+    _fields_ = [
+        ('x', c_int),
+        ('y', c_double)
+        ]
+
+class _TestSharedCTypes(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def _double(self, x, y, foo, arr, string):
+        x.value *= 2
+        y.value *= 2
+        foo.x *= 2
+        foo.y *= 2
+        string.value *= 2
+        for i in range(len(arr)):
+            arr[i] *= 2
+
+    def test_sharedctypes(self, lock=False):
+        if c_int is None:
+            return
+
+        x = Value('i', 7, lock=lock)
+        y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
+        foo = Value(_Foo, 3, 2, lock=lock)
+        arr = Array('d', range(10), lock=lock)
+        string = Array('c', 20, lock=lock)
+        string.value = 'hello'
+
+        p = self.Process(target=self._double, args=(x, y, foo, arr, string))
+        p.start()
+        p.join()
+
+        self.assertEqual(x.value, 14)
+        self.assertAlmostEqual(y.value, 2.0/3.0)
+        self.assertEqual(foo.x, 6)
+        self.assertAlmostEqual(foo.y, 4.0)
+        for i in range(10):
+            self.assertAlmostEqual(arr[i], i*2)
+        self.assertEqual(string.value, latin('hellohello'))
+
+    def test_synchronize(self):
+        self.test_sharedctypes(lock=True)
+
+    def test_copy(self):
+        if c_int is None:
+            return
+
+        foo = _Foo(2, 5.0)
+        bar = copy(foo)
+        foo.x = 0
+        foo.y = 0
+        self.assertEqual(bar.x, 2)
+        self.assertAlmostEqual(bar.y, 5.0)
+
+#
+#
+#
+
+class _TestFinalize(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def _test_finalize(self, conn):
+        class Foo(object):
+            pass
+
+        a = Foo()
+        util.Finalize(a, conn.send, args=('a',))
+        del a           # triggers callback for a
+
+        b = Foo()
+        close_b = util.Finalize(b, conn.send, args=('b',))
+        close_b()       # triggers callback for b
+        close_b()       # does nothing because callback has already been called
+        del b           # does nothing because callback has already been called
+
+        c = Foo()
+        util.Finalize(c, conn.send, args=('c',))
+
+        d10 = Foo()
+        util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
+
+        d01 = Foo()
+        util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
+        d02 = Foo()
+        util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
+        d03 = Foo()
+        util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
+
+        util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
+
+        util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
+
+        # call mutliprocessing's cleanup function then exit process without
+        # garbage collecting locals
+        util._exit_function()
+        conn.close()
+        os._exit(0)
+
+    def test_finalize(self):
+        conn, child_conn = self.Pipe()
+
+        p = self.Process(target=self._test_finalize, args=(child_conn,))
+        p.start()
+        p.join()
+
+        result = [obj for obj in iter(conn.recv, 'STOP')]
+        self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
+
+#
+# Test that from ... import * works for each module
+#
+
+class _TestImportStar(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_import(self):
+        modules = (
+            'multiprocessing', 'multiprocessing.connection',
+            'multiprocessing.heap', 'multiprocessing.managers',
+            'multiprocessing.pool', 'multiprocessing.process',
+            'multiprocessing.reduction', 'multiprocessing.sharedctypes',
+            'multiprocessing.synchronize', 'multiprocessing.util'
+            )
+
+        for name in modules:
+            __import__(name)
+            mod = sys.modules[name]
+
+            for attr in getattr(mod, '__all__', ()):
+                self.assertTrue(
+                    hasattr(mod, attr),
+                    '%r does not have attribute %r' % (mod, attr)
+                    )
+
+#
+# Quick test that logging works -- does not test logging output
+#
+
+class _TestLogging(BaseTestCase):
+
+    ALLOWED_TYPES = ('processes',)
+
+    def test_enable_logging(self):
+        logger = multiprocessing.get_logger()
+        logger.setLevel(util.SUBWARNING)
+        self.assertTrue(logger is not None)
+        logger.debug('this will not be printed')
+        logger.info('nor will this')
+        logger.setLevel(LOG_LEVEL)
+
+    def _test_level(self, conn):
+        logger = multiprocessing.get_logger()
+        conn.send(logger.getEffectiveLevel())
+
+    def test_level(self):
+        LEVEL1 = 32
+        LEVEL2 = 37
+
+        logger = multiprocessing.get_logger()
+        root_logger = logging.getLogger()
+        root_level = root_logger.level
+
+        reader, writer = multiprocessing.Pipe(duplex=False)
+
+        logger.setLevel(LEVEL1)
+        self.Process(target=self._test_level, args=(writer,)).start()
+        self.assertEqual(LEVEL1, reader.recv())
+
+        logger.setLevel(logging.NOTSET)
+        root_logger.setLevel(LEVEL2)
+        self.Process(target=self._test_level, args=(writer,)).start()
+        self.assertEqual(LEVEL2, reader.recv())
+
+        root_logger.setLevel(root_level)
+        logger.setLevel(level=LOG_LEVEL)
+
+#
+# Functions used to create test cases from the base ones in this module
+#
+
+def get_attributes(Source, names):
+    d = {}
+    for name in names:
+        obj = getattr(Source, name)
+        if type(obj) == type(get_attributes):
+            obj = staticmethod(obj)
+        d[name] = obj
+    return d
+
+def create_test_cases(Mixin, type):
+    result = {}
+    glob = globals()
+    Type = type[0].upper() + type[1:]
+
+    for name in glob.keys():
+        if name.startswith('_Test'):
+            base = glob[name]
+            if type in base.ALLOWED_TYPES:
+                newname = 'With' + Type + name[1:]
+                class Temp(base, unittest.TestCase, Mixin):
+                    pass
+                result[newname] = Temp
+                Temp.__name__ = newname
+                Temp.__module__ = Mixin.__module__
+    return result
+
+#
+# Create test cases
+#
+
+class ProcessesMixin(object):
+    TYPE = 'processes'
+    Process = multiprocessing.Process
+    locals().update(get_attributes(multiprocessing, (
+        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+        'Condition', 'Event', 'Value', 'Array', 'RawValue',
+        'RawArray', 'current_process', 'active_children', 'Pipe',
+        'connection', 'JoinableQueue'
+        )))
+
+testcases_processes = create_test_cases(ProcessesMixin, type='processes')
+globals().update(testcases_processes)
+
+
+class ManagerMixin(object):
+    TYPE = 'manager'
+    Process = multiprocessing.Process
+    manager = object.__new__(multiprocessing.managers.SyncManager)
+    locals().update(get_attributes(manager, (
+        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+       'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
+        'Namespace', 'JoinableQueue'
+        )))
+
+testcases_manager = create_test_cases(ManagerMixin, type='manager')
+globals().update(testcases_manager)
+
+
+class ThreadsMixin(object):
+    TYPE = 'threads'
+    Process = multiprocessing.dummy.Process
+    locals().update(get_attributes(multiprocessing.dummy, (
+        'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+        'Condition', 'Event', 'Value', 'Array', 'current_process',
+        'active_children', 'Pipe', 'connection', 'dict', 'list',
+        'Namespace', 'JoinableQueue'
+        )))
+
+testcases_threads = create_test_cases(ThreadsMixin, type='threads')
+globals().update(testcases_threads)
+
+#
+#
+#
+
+def test_main(run=None):
+    if run is None:
+        from test.test_support import run_unittest as run
+
+    util.get_temp_dir()     # creates temp directory for use by all processes
+
+    multiprocessing.get_logger().setLevel(LOG_LEVEL)
+
+    ProcessesMixin.pool = multiprocessing.Pool(4)
+    ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
+    ManagerMixin.manager.__init__()
+    ManagerMixin.manager.start()
+    ManagerMixin.pool = ManagerMixin.manager.Pool(4)
+
+    testcases = (
+        sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
+        sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
+        sorted(testcases_manager.values(), key=lambda tc:tc.__name__)
+        )
+
+    loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
+    suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
+    run(suite)
+
+    ThreadsMixin.pool.terminate()
+    ProcessesMixin.pool.terminate()
+    ManagerMixin.pool.terminate()
+    ManagerMixin.manager.shutdown()
+
+    del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
+
+def main():
+    test_main(unittest.TextTestRunner(verbosity=2).run)
+
+if __name__ == '__main__':
+    main()
index c1ac4c7c615fde827e13e4a9e35e41363472a659..48049c76b2544f3d01a8f7d97b7b6186343f4b55 100644 (file)
-/*\r
- * Extension module used by multiprocessing package\r
- *\r
- * multiprocessing.c\r
- *\r
- * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
- */\r
-\r
-#include "multiprocessing.h"\r
-\r
-PyObject *create_win32_namespace(void);\r
-\r
-PyObject *pickle_dumps, *pickle_loads, *pickle_protocol;\r
-PyObject *ProcessError, *BufferTooShort;\r
-\r
-/*\r
- * Function which raises exceptions based on error codes\r
- */\r
-\r
-PyObject *\r
-mp_SetError(PyObject *Type, int num)\r
-{\r
-       switch (num) {\r
-#ifdef MS_WINDOWS\r
-       case MP_STANDARD_ERROR: \r
-               if (Type == NULL)\r
-                       Type = PyExc_WindowsError;\r
-               PyErr_SetExcFromWindowsErr(Type, 0);\r
-               break;\r
-       case MP_SOCKET_ERROR:\r
-               if (Type == NULL)\r
-                       Type = PyExc_WindowsError;\r
-               PyErr_SetExcFromWindowsErr(Type, WSAGetLastError());\r
-               break;\r
-#else /* !MS_WINDOWS */\r
-       case MP_STANDARD_ERROR:\r
-       case MP_SOCKET_ERROR:\r
-               if (Type == NULL)\r
-                       Type = PyExc_OSError;\r
-               PyErr_SetFromErrno(Type);\r
-               break;\r
-#endif /* !MS_WINDOWS */\r
-       case MP_MEMORY_ERROR:\r
-               PyErr_NoMemory();\r
-               break;\r
-       case MP_END_OF_FILE:\r
-               PyErr_SetNone(PyExc_EOFError);\r
-               break;\r
-       case MP_EARLY_END_OF_FILE:\r
-               PyErr_SetString(PyExc_IOError,\r
-                               "got end of file during message");\r
-               break;\r
-       case MP_BAD_MESSAGE_LENGTH:\r
-               PyErr_SetString(PyExc_IOError, "bad message length");\r
-               break;\r
-       case MP_EXCEPTION_HAS_BEEN_SET:\r
-               break;\r
-       default:\r
-               PyErr_Format(PyExc_RuntimeError,\r
-                            "unkown error number %d", num);\r
-       }\r
-       return NULL;\r
-}\r
-\r
-\r
-/*\r
- * Windows only\r
- */\r
-\r
-#ifdef MS_WINDOWS\r
-\r
-/* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */\r
-\r
-HANDLE sigint_event = NULL;\r
-\r
-static BOOL WINAPI\r
-ProcessingCtrlHandler(DWORD dwCtrlType)\r
-{\r
-       SetEvent(sigint_event);\r
-       return FALSE;\r
-}\r
-\r
-/*\r
- * Unix only\r
- */\r
-\r
-#else /* !MS_WINDOWS */\r
-\r
-#if HAVE_FD_TRANSFER\r
-\r
-/* Functions for transferring file descriptors between processes.\r
-   Reimplements some of the functionality of the fdcred\r
-   module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */\r
-\r
-static PyObject *\r
-multiprocessing_sendfd(PyObject *self, PyObject *args)\r
-{\r
-       int conn, fd, res;\r
-       char dummy_char;\r
-       char buf[CMSG_SPACE(sizeof(int))];\r
-       struct msghdr msg = {0};\r
-       struct iovec dummy_iov;\r
-       struct cmsghdr *cmsg;\r
-\r
-       if (!PyArg_ParseTuple(args, "ii", &conn, &fd))\r
-               return NULL;\r
-\r
-       dummy_iov.iov_base = &dummy_char;\r
-       dummy_iov.iov_len = 1;\r
-       msg.msg_control = buf;\r
-       msg.msg_controllen = sizeof(buf);\r
-       msg.msg_iov = &dummy_iov;\r
-       msg.msg_iovlen = 1;\r
-       cmsg = CMSG_FIRSTHDR(&msg);\r
-       cmsg->cmsg_level = SOL_SOCKET;\r
-       cmsg->cmsg_type = SCM_RIGHTS;\r
-       cmsg->cmsg_len = CMSG_LEN(sizeof(int));\r
-       msg.msg_controllen = cmsg->cmsg_len;\r
-       *(int*)CMSG_DATA(cmsg) = fd;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       res = sendmsg(conn, &msg, 0);\r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (res < 0)\r
-               return PyErr_SetFromErrno(PyExc_OSError);\r
-       Py_RETURN_NONE;\r
-}\r
-\r
-static PyObject *\r
-multiprocessing_recvfd(PyObject *self, PyObject *args)\r
-{\r
-       int conn, fd, res;\r
-       char dummy_char;\r
-       char buf[CMSG_SPACE(sizeof(int))];\r
-       struct msghdr msg = {0};\r
-       struct iovec dummy_iov;\r
-       struct cmsghdr *cmsg;\r
-\r
-       if (!PyArg_ParseTuple(args, "i", &conn))\r
-               return NULL;\r
-\r
-       dummy_iov.iov_base = &dummy_char;\r
-       dummy_iov.iov_len = 1;\r
-       msg.msg_control = buf;\r
-       msg.msg_controllen = sizeof(buf);\r
-       msg.msg_iov = &dummy_iov;\r
-       msg.msg_iovlen = 1;\r
-       cmsg = CMSG_FIRSTHDR(&msg);\r
-       cmsg->cmsg_level = SOL_SOCKET;\r
-       cmsg->cmsg_type = SCM_RIGHTS;\r
-       cmsg->cmsg_len = CMSG_LEN(sizeof(int));\r
-       msg.msg_controllen = cmsg->cmsg_len;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       res = recvmsg(conn, &msg, 0);\r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (res < 0)\r
-               return PyErr_SetFromErrno(PyExc_OSError);\r
-\r
-       fd = *(int*)CMSG_DATA(cmsg);\r
-       return Py_BuildValue("i", fd);\r
-}\r
-\r
-#endif /* HAVE_FD_TRANSFER */\r
-\r
-#endif /* !MS_WINDOWS */\r
-\r
-\r
-/*\r
- * All platforms\r
- */\r
-\r
-static PyObject*\r
-multiprocessing_address_of_buffer(PyObject *self, PyObject *obj)\r
-{\r
-       void *buffer;\r
-       Py_ssize_t buffer_len;\r
-\r
-       if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)\r
-               return NULL;\r
-\r
-       return Py_BuildValue("N" F_PY_SSIZE_T, \r
-                            PyLong_FromVoidPtr(buffer), buffer_len);\r
-}\r
-\r
-\r
-/*\r
- * Function table\r
- */\r
-\r
-static PyMethodDef module_methods[] = {\r
-       {"address_of_buffer", multiprocessing_address_of_buffer, METH_O, \r
-        "address_of_buffer(obj) -> int\n" \r
-        "Return address of obj assuming obj supports buffer inteface"},\r
-#if HAVE_FD_TRANSFER\r
-       {"sendfd", multiprocessing_sendfd, METH_VARARGS, \r
-        "sendfd(sockfd, fd) -> None\n"\r
-        "Send file descriptor given by fd over the unix domain socket\n"\r
-        "whose file decriptor is sockfd"},\r
-       {"recvfd", multiprocessing_recvfd, METH_VARARGS,\r
-        "recvfd(sockfd) -> fd\n"\r
-        "Receive a file descriptor over a unix domain socket\n"\r
-        "whose file decriptor is sockfd"},\r
-#endif\r
-       {NULL}\r
-};\r
-\r
-\r
-/*\r
- * Initialize\r
- */\r
-\r
-PyMODINIT_FUNC\r
-init_multiprocessing(void)\r
-{\r
-       PyObject *module, *temp, *value;\r
-\r
-       /* Initialize module */\r
-       module = Py_InitModule("_multiprocessing", module_methods);\r
-       if (!module)\r
-               return;\r
-\r
-       /* Get copy of objects from pickle */\r
-       temp = PyImport_ImportModule(PICKLE_MODULE);\r
-       if (!temp)\r
-               return;\r
-       pickle_dumps = PyObject_GetAttrString(temp, "dumps");\r
-       pickle_loads = PyObject_GetAttrString(temp, "loads");\r
-       pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");\r
-       Py_XDECREF(temp);\r
-\r
-       /* Get copy of BufferTooShort */\r
-       temp = PyImport_ImportModule("multiprocessing");\r
-       if (!temp)\r
-               return;\r
-       BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort");\r
-       Py_XDECREF(temp);\r
-\r
-       /* Add connection type to module */\r
-       if (PyType_Ready(&ConnectionType) < 0)\r
-               return;\r
-       Py_INCREF(&ConnectionType);     \r
-       PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);\r
-\r
-#if defined(MS_WINDOWS) || HAVE_SEM_OPEN\r
-       /* Add SemLock type to module */\r
-       if (PyType_Ready(&SemLockType) < 0)\r
-               return;\r
-       Py_INCREF(&SemLockType);\r
-       PyDict_SetItemString(SemLockType.tp_dict, "SEM_VALUE_MAX", \r
-                            Py_BuildValue("i", SEM_VALUE_MAX));\r
-       PyModule_AddObject(module, "SemLock", (PyObject*)&SemLockType);   \r
-#endif\r
-\r
-#ifdef MS_WINDOWS\r
-       /* Add PipeConnection to module */\r
-       if (PyType_Ready(&PipeConnectionType) < 0)\r
-               return;\r
-       Py_INCREF(&PipeConnectionType);\r
-       PyModule_AddObject(module, "PipeConnection",\r
-                          (PyObject*)&PipeConnectionType);\r
-\r
-       /* Initialize win32 class and add to multiprocessing */\r
-       temp = create_win32_namespace();\r
-       if (!temp)\r
-               return;\r
-       PyModule_AddObject(module, "win32", temp);\r
-\r
-       /* Initialize the event handle used to signal Ctrl-C */\r
-       sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL);\r
-       if (!sigint_event) {\r
-               PyErr_SetFromWindowsErr(0);\r
-               return;\r
-       }\r
-       if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {\r
-               PyErr_SetFromWindowsErr(0);\r
-               return;\r
-       }\r
-#endif\r
-\r
-       /* Add configuration macros */\r
-       temp = PyDict_New();\r
-       if (!temp)\r
-               return;\r
-#define ADD_FLAG(name)                                           \\r
-       value = Py_BuildValue("i", name);                         \\r
-       if (value == NULL) { Py_DECREF(temp); return; }           \\r
-       if (PyDict_SetItemString(temp, #name, value) < 0) {       \\r
-               Py_DECREF(temp); Py_DECREF(value); return; }      \\r
-       Py_DECREF(value)\r
-       \r
-#ifdef HAVE_SEM_OPEN\r
-       ADD_FLAG(HAVE_SEM_OPEN);\r
-#endif\r
-#ifdef HAVE_SEM_TIMEDWAIT\r
-       ADD_FLAG(HAVE_SEM_TIMEDWAIT);\r
-#endif\r
-#ifdef HAVE_FD_TRANSFER\r
-       ADD_FLAG(HAVE_FD_TRANSFER);\r
-#endif\r
-#ifdef HAVE_BROKEN_SEM_GETVALUE\r
-       ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE);\r
-#endif\r
-#ifdef HAVE_BROKEN_SEM_UNLINK\r
-       ADD_FLAG(HAVE_BROKEN_SEM_UNLINK);\r
-#endif\r
-       if (PyModule_AddObject(module, "flags", temp) < 0)\r
-               return;\r
-}\r
+/*
+ * Extension module used by multiprocessing package
+ *
+ * multiprocessing.c
+ *
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+ */
+
+#include "multiprocessing.h"
+
+PyObject *create_win32_namespace(void);
+
+PyObject *pickle_dumps, *pickle_loads, *pickle_protocol;
+PyObject *ProcessError, *BufferTooShort;
+
+/*
+ * Function which raises exceptions based on error codes
+ */
+
+PyObject *
+mp_SetError(PyObject *Type, int num)
+{
+       switch (num) {
+#ifdef MS_WINDOWS
+       case MP_STANDARD_ERROR: 
+               if (Type == NULL)
+                       Type = PyExc_WindowsError;
+               PyErr_SetExcFromWindowsErr(Type, 0);
+               break;
+       case MP_SOCKET_ERROR:
+               if (Type == NULL)
+                       Type = PyExc_WindowsError;
+               PyErr_SetExcFromWindowsErr(Type, WSAGetLastError());
+               break;
+#else /* !MS_WINDOWS */
+       case MP_STANDARD_ERROR:
+       case MP_SOCKET_ERROR:
+               if (Type == NULL)
+                       Type = PyExc_OSError;
+               PyErr_SetFromErrno(Type);
+               break;
+#endif /* !MS_WINDOWS */
+       case MP_MEMORY_ERROR:
+               PyErr_NoMemory();
+               break;
+       case MP_END_OF_FILE:
+               PyErr_SetNone(PyExc_EOFError);
+               break;
+       case MP_EARLY_END_OF_FILE:
+               PyErr_SetString(PyExc_IOError,
+                               "got end of file during message");
+               break;
+       case MP_BAD_MESSAGE_LENGTH:
+               PyErr_SetString(PyExc_IOError, "bad message length");
+               break;
+       case MP_EXCEPTION_HAS_BEEN_SET:
+               break;
+       default:
+               PyErr_Format(PyExc_RuntimeError,
+                            "unkown error number %d", num);
+       }
+       return NULL;
+}
+
+
+/*
+ * Windows only
+ */
+
+#ifdef MS_WINDOWS
+
+/* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */
+
+HANDLE sigint_event = NULL;
+
+static BOOL WINAPI
+ProcessingCtrlHandler(DWORD dwCtrlType)
+{
+       SetEvent(sigint_event);
+       return FALSE;
+}
+
+/*
+ * Unix only
+ */
+
+#else /* !MS_WINDOWS */
+
+#if HAVE_FD_TRANSFER
+
+/* Functions for transferring file descriptors between processes.
+   Reimplements some of the functionality of the fdcred
+   module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */
+
+static PyObject *
+multiprocessing_sendfd(PyObject *self, PyObject *args)
+{
+       int conn, fd, res;
+       char dummy_char;
+       char buf[CMSG_SPACE(sizeof(int))];
+       struct msghdr msg = {0};
+       struct iovec dummy_iov;
+       struct cmsghdr *cmsg;
+
+       if (!PyArg_ParseTuple(args, "ii", &conn, &fd))
+               return NULL;
+
+       dummy_iov.iov_base = &dummy_char;
+       dummy_iov.iov_len = 1;
+       msg.msg_control = buf;
+       msg.msg_controllen = sizeof(buf);
+       msg.msg_iov = &dummy_iov;
+       msg.msg_iovlen = 1;
+       cmsg = CMSG_FIRSTHDR(&msg);
+       cmsg->cmsg_level = SOL_SOCKET;
+       cmsg->cmsg_type = SCM_RIGHTS;
+       cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+       msg.msg_controllen = cmsg->cmsg_len;
+       *(int*)CMSG_DATA(cmsg) = fd;
+
+       Py_BEGIN_ALLOW_THREADS
+       res = sendmsg(conn, &msg, 0);
+       Py_END_ALLOW_THREADS
+
+       if (res < 0)
+               return PyErr_SetFromErrno(PyExc_OSError);
+       Py_RETURN_NONE;
+}
+
+static PyObject *
+multiprocessing_recvfd(PyObject *self, PyObject *args)
+{
+       int conn, fd, res;
+       char dummy_char;
+       char buf[CMSG_SPACE(sizeof(int))];
+       struct msghdr msg = {0};
+       struct iovec dummy_iov;
+       struct cmsghdr *cmsg;
+
+       if (!PyArg_ParseTuple(args, "i", &conn))
+               return NULL;
+
+       dummy_iov.iov_base = &dummy_char;
+       dummy_iov.iov_len = 1;
+       msg.msg_control = buf;
+       msg.msg_controllen = sizeof(buf);
+       msg.msg_iov = &dummy_iov;
+       msg.msg_iovlen = 1;
+       cmsg = CMSG_FIRSTHDR(&msg);
+       cmsg->cmsg_level = SOL_SOCKET;
+       cmsg->cmsg_type = SCM_RIGHTS;
+       cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+       msg.msg_controllen = cmsg->cmsg_len;
+
+       Py_BEGIN_ALLOW_THREADS
+       res = recvmsg(conn, &msg, 0);
+       Py_END_ALLOW_THREADS
+
+       if (res < 0)
+               return PyErr_SetFromErrno(PyExc_OSError);
+
+       fd = *(int*)CMSG_DATA(cmsg);
+       return Py_BuildValue("i", fd);
+}
+
+#endif /* HAVE_FD_TRANSFER */
+
+#endif /* !MS_WINDOWS */
+
+
+/*
+ * All platforms
+ */
+
+static PyObject*
+multiprocessing_address_of_buffer(PyObject *self, PyObject *obj)
+{
+       void *buffer;
+       Py_ssize_t buffer_len;
+
+       if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)
+               return NULL;
+
+       return Py_BuildValue("N" F_PY_SSIZE_T, 
+                            PyLong_FromVoidPtr(buffer), buffer_len);
+}
+
+
+/*
+ * Function table
+ */
+
+static PyMethodDef module_methods[] = {
+       {"address_of_buffer", multiprocessing_address_of_buffer, METH_O, 
+        "address_of_buffer(obj) -> int\n" 
+        "Return address of obj assuming obj supports buffer inteface"},
+#if HAVE_FD_TRANSFER
+       {"sendfd", multiprocessing_sendfd, METH_VARARGS, 
+        "sendfd(sockfd, fd) -> None\n"
+        "Send file descriptor given by fd over the unix domain socket\n"
+        "whose file decriptor is sockfd"},
+       {"recvfd", multiprocessing_recvfd, METH_VARARGS,
+        "recvfd(sockfd) -> fd\n"
+        "Receive a file descriptor over a unix domain socket\n"
+        "whose file decriptor is sockfd"},
+#endif
+       {NULL}
+};
+
+
+/*
+ * Initialize
+ */
+
+PyMODINIT_FUNC
+init_multiprocessing(void)
+{
+       PyObject *module, *temp, *value;
+
+       /* Initialize module */
+       module = Py_InitModule("_multiprocessing", module_methods);
+       if (!module)
+               return;
+
+       /* Get copy of objects from pickle */
+       temp = PyImport_ImportModule(PICKLE_MODULE);
+       if (!temp)
+               return;
+       pickle_dumps = PyObject_GetAttrString(temp, "dumps");
+       pickle_loads = PyObject_GetAttrString(temp, "loads");
+       pickle_protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");
+       Py_XDECREF(temp);
+
+       /* Get copy of BufferTooShort */
+       temp = PyImport_ImportModule("multiprocessing");
+       if (!temp)
+               return;
+       BufferTooShort = PyObject_GetAttrString(temp, "BufferTooShort");
+       Py_XDECREF(temp);
+
+       /* Add connection type to module */
+       if (PyType_Ready(&ConnectionType) < 0)
+               return;
+       Py_INCREF(&ConnectionType);     
+       PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);
+
+#if defined(MS_WINDOWS) || HAVE_SEM_OPEN
+       /* Add SemLock type to module */
+       if (PyType_Ready(&SemLockType) < 0)
+               return;
+       Py_INCREF(&SemLockType);
+       PyDict_SetItemString(SemLockType.tp_dict, "SEM_VALUE_MAX", 
+                            Py_BuildValue("i", SEM_VALUE_MAX));
+       PyModule_AddObject(module, "SemLock", (PyObject*)&SemLockType);   
+#endif
+
+#ifdef MS_WINDOWS
+       /* Add PipeConnection to module */
+       if (PyType_Ready(&PipeConnectionType) < 0)
+               return;
+       Py_INCREF(&PipeConnectionType);
+       PyModule_AddObject(module, "PipeConnection",
+                          (PyObject*)&PipeConnectionType);
+
+       /* Initialize win32 class and add to multiprocessing */
+       temp = create_win32_namespace();
+       if (!temp)
+               return;
+       PyModule_AddObject(module, "win32", temp);
+
+       /* Initialize the event handle used to signal Ctrl-C */
+       sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL);
+       if (!sigint_event) {
+               PyErr_SetFromWindowsErr(0);
+               return;
+       }
+       if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {
+               PyErr_SetFromWindowsErr(0);
+               return;
+       }
+#endif
+
+       /* Add configuration macros */
+       temp = PyDict_New();
+       if (!temp)
+               return;
+#define ADD_FLAG(name)                                           \
+       value = Py_BuildValue("i", name);                         \
+       if (value == NULL) { Py_DECREF(temp); return; }           \
+       if (PyDict_SetItemString(temp, #name, value) < 0) {       \
+               Py_DECREF(temp); Py_DECREF(value); return; }      \
+       Py_DECREF(value)
+       
+#ifdef HAVE_SEM_OPEN
+       ADD_FLAG(HAVE_SEM_OPEN);
+#endif
+#ifdef HAVE_SEM_TIMEDWAIT
+       ADD_FLAG(HAVE_SEM_TIMEDWAIT);
+#endif
+#ifdef HAVE_FD_TRANSFER
+       ADD_FLAG(HAVE_FD_TRANSFER);
+#endif
+#ifdef HAVE_BROKEN_SEM_GETVALUE
+       ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE);
+#endif
+#ifdef HAVE_BROKEN_SEM_UNLINK
+       ADD_FLAG(HAVE_BROKEN_SEM_UNLINK);
+#endif
+       if (PyModule_AddObject(module, "flags", temp) < 0)
+               return;
+}
index 40f2c08f8e2bd840f122f9ee9fc3388d92aa86d4..57eb7b4b98347f535fbf7402d8faa73cc912e2ca 100644 (file)
-#ifndef MULTIPROCESSING_H\r
-#define MULTIPROCESSING_H\r
-\r
-#define PY_SSIZE_T_CLEAN\r
-\r
-#include "Python.h"\r
-#include "structmember.h"\r
-#include "pythread.h"\r
-\r
-/*\r
- * Platform includes and definitions\r
- */\r
-\r
-#ifdef MS_WINDOWS\r
-#  define WIN32_LEAN_AND_MEAN\r
-#  include <windows.h>\r
-#  include <winsock2.h>\r
-#  include <process.h>              /* getpid() */\r
-#  define SEM_HANDLE HANDLE\r
-#  define SEM_VALUE_MAX LONG_MAX\r
-#else\r
-#  include <fcntl.h>                 /* O_CREAT and O_EXCL */\r
-#  include <sys/socket.h>\r
-#  include <arpa/inet.h>             /* htonl() and ntohl() */\r
-#  if HAVE_SEM_OPEN\r
-#    include <semaphore.h>\r
-     typedef sem_t *SEM_HANDLE;\r
-#  endif\r
-#  define HANDLE int\r
-#  define SOCKET int\r
-#  define BOOL int\r
-#  define UINT32 uint32_t\r
-#  define INT32 int32_t\r
-#  define TRUE 1\r
-#  define FALSE 0\r
-#  define INVALID_HANDLE_VALUE (-1)\r
-#endif\r
-\r
-/*\r
- * Make sure Py_ssize_t available\r
- */\r
-\r
-#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)\r
-   typedef int Py_ssize_t;\r
-#  define PY_SSIZE_T_MAX INT_MAX\r
-#  define PY_SSIZE_T_MIN INT_MIN\r
-#  define F_PY_SSIZE_T "i"\r
-#  define PY_FORMAT_SIZE_T ""\r
-#  define PyInt_FromSsize_t(n) PyInt_FromLong((long)n)\r
-#else\r
-#  define F_PY_SSIZE_T "n"\r
-#endif\r
-\r
-/*\r
- * Format codes\r
- */\r
-\r
-#if SIZEOF_VOID_P == SIZEOF_LONG\r
-#  define F_POINTER "k"\r
-#  define T_POINTER T_ULONG\r
-#elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG)\r
-#  define F_POINTER "K"\r
-#  define T_POINTER T_ULONGLONG\r
-#else\r
-#  error "can't find format code for unsigned integer of same size as void*"\r
-#endif\r
-\r
-#ifdef MS_WINDOWS\r
-#  define F_HANDLE F_POINTER\r
-#  define T_HANDLE T_POINTER\r
-#  define F_SEM_HANDLE F_HANDLE\r
-#  define T_SEM_HANDLE T_HANDLE\r
-#  define F_DWORD "k"\r
-#  define T_DWORD T_ULONG\r
-#else\r
-#  define F_HANDLE "i"\r
-#  define T_HANDLE T_INT\r
-#  define F_SEM_HANDLE F_POINTER\r
-#  define T_SEM_HANDLE T_POINTER\r
-#endif\r
-\r
-#if PY_VERSION_HEX >= 0x03000000\r
-#  define F_RBUFFER "y"\r
-#else\r
-#  define F_RBUFFER "s"\r
-#endif\r
-\r
-/*\r
- * Error codes which can be returned by functions called without GIL\r
- */\r
-\r
-#define MP_SUCCESS (0)\r
-#define MP_STANDARD_ERROR (-1)\r
-#define MP_MEMORY_ERROR (-1001)\r
-#define MP_END_OF_FILE (-1002)\r
-#define MP_EARLY_END_OF_FILE (-1003)\r
-#define MP_BAD_MESSAGE_LENGTH (-1004)\r
-#define MP_SOCKET_ERROR (-1005)\r
-#define MP_EXCEPTION_HAS_BEEN_SET (-1006)\r
-\r
-PyObject *mp_SetError(PyObject *Type, int num);\r
-\r
-/*\r
- * Externs - not all will really exist on all platforms\r
- */\r
-\r
-extern PyObject *pickle_dumps;\r
-extern PyObject *pickle_loads;\r
-extern PyObject *pickle_protocol;\r
-extern PyObject *BufferTooShort;\r
-extern PyTypeObject SemLockType;\r
-extern PyTypeObject ConnectionType;\r
-extern PyTypeObject PipeConnectionType;\r
-extern HANDLE sigint_event;\r
-\r
-/*\r
- * Py3k compatibility\r
- */\r
-\r
-#if PY_VERSION_HEX >= 0x03000000\r
-#  define PICKLE_MODULE "pickle"\r
-#  define FROM_FORMAT PyUnicode_FromFormat\r
-#  define PyInt_FromLong PyLong_FromLong\r
-#  define PyInt_FromSsize_t PyLong_FromSsize_t\r
-#else\r
-#  define PICKLE_MODULE "cPickle"\r
-#  define FROM_FORMAT PyString_FromFormat\r
-#endif\r
-\r
-#ifndef PyVarObject_HEAD_INIT\r
-#  define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,\r
-#endif\r
-\r
-#ifndef Py_TPFLAGS_HAVE_WEAKREFS\r
-#  define Py_TPFLAGS_HAVE_WEAKREFS 0\r
-#endif\r
-\r
-/*\r
- * Connection definition\r
- */\r
-\r
-#define CONNECTION_BUFFER_SIZE 1024\r
-\r
-typedef struct {\r
-       PyObject_HEAD\r
-       HANDLE handle;\r
-       int flags;\r
-       PyObject *weakreflist;\r
-       char buffer[CONNECTION_BUFFER_SIZE];\r
-} ConnectionObject;\r
-\r
-/*\r
- * Miscellaneous\r
- */\r
-\r
-#define MAX_MESSAGE_LENGTH 0x7fffffff\r
-\r
-#ifndef MIN\r
-#  define MIN(x, y) ((x) < (y) ? x : y)\r
-#  define MAX(x, y) ((x) > (y) ? x : y)\r
-#endif\r
-\r
-#endif /* MULTIPROCESSING_H */\r
+#ifndef MULTIPROCESSING_H
+#define MULTIPROCESSING_H
+
+#define PY_SSIZE_T_CLEAN
+
+#include "Python.h"
+#include "structmember.h"
+#include "pythread.h"
+
+/*
+ * Platform includes and definitions
+ */
+
+#ifdef MS_WINDOWS
+#  define WIN32_LEAN_AND_MEAN
+#  include <windows.h>
+#  include <winsock2.h>
+#  include <process.h>              /* getpid() */
+#  define SEM_HANDLE HANDLE
+#  define SEM_VALUE_MAX LONG_MAX
+#else
+#  include <fcntl.h>                 /* O_CREAT and O_EXCL */
+#  include <sys/socket.h>
+#  include <arpa/inet.h>             /* htonl() and ntohl() */
+#  if HAVE_SEM_OPEN
+#    include <semaphore.h>
+     typedef sem_t *SEM_HANDLE;
+#  endif
+#  define HANDLE int
+#  define SOCKET int
+#  define BOOL int
+#  define UINT32 uint32_t
+#  define INT32 int32_t
+#  define TRUE 1
+#  define FALSE 0
+#  define INVALID_HANDLE_VALUE (-1)
+#endif
+
+/*
+ * Make sure Py_ssize_t available
+ */
+
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+   typedef int Py_ssize_t;
+#  define PY_SSIZE_T_MAX INT_MAX
+#  define PY_SSIZE_T_MIN INT_MIN
+#  define F_PY_SSIZE_T "i"
+#  define PY_FORMAT_SIZE_T ""
+#  define PyInt_FromSsize_t(n) PyInt_FromLong((long)n)
+#else
+#  define F_PY_SSIZE_T "n"
+#endif
+
+/*
+ * Format codes
+ */
+
+#if SIZEOF_VOID_P == SIZEOF_LONG
+#  define F_POINTER "k"
+#  define T_POINTER T_ULONG
+#elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG)
+#  define F_POINTER "K"
+#  define T_POINTER T_ULONGLONG
+#else
+#  error "can't find format code for unsigned integer of same size as void*"
+#endif
+
+#ifdef MS_WINDOWS
+#  define F_HANDLE F_POINTER
+#  define T_HANDLE T_POINTER
+#  define F_SEM_HANDLE F_HANDLE
+#  define T_SEM_HANDLE T_HANDLE
+#  define F_DWORD "k"
+#  define T_DWORD T_ULONG
+#else
+#  define F_HANDLE "i"
+#  define T_HANDLE T_INT
+#  define F_SEM_HANDLE F_POINTER
+#  define T_SEM_HANDLE T_POINTER
+#endif
+
+#if PY_VERSION_HEX >= 0x03000000
+#  define F_RBUFFER "y"
+#else
+#  define F_RBUFFER "s"
+#endif
+
+/*
+ * Error codes which can be returned by functions called without GIL
+ */
+
+#define MP_SUCCESS (0)
+#define MP_STANDARD_ERROR (-1)
+#define MP_MEMORY_ERROR (-1001)
+#define MP_END_OF_FILE (-1002)
+#define MP_EARLY_END_OF_FILE (-1003)
+#define MP_BAD_MESSAGE_LENGTH (-1004)
+#define MP_SOCKET_ERROR (-1005)
+#define MP_EXCEPTION_HAS_BEEN_SET (-1006)
+
+PyObject *mp_SetError(PyObject *Type, int num);
+
+/*
+ * Externs - not all will really exist on all platforms
+ */
+
+extern PyObject *pickle_dumps;
+extern PyObject *pickle_loads;
+extern PyObject *pickle_protocol;
+extern PyObject *BufferTooShort;
+extern PyTypeObject SemLockType;
+extern PyTypeObject ConnectionType;
+extern PyTypeObject PipeConnectionType;
+extern HANDLE sigint_event;
+
+/*
+ * Py3k compatibility
+ */
+
+#if PY_VERSION_HEX >= 0x03000000
+#  define PICKLE_MODULE "pickle"
+#  define FROM_FORMAT PyUnicode_FromFormat
+#  define PyInt_FromLong PyLong_FromLong
+#  define PyInt_FromSsize_t PyLong_FromSsize_t
+#else
+#  define PICKLE_MODULE "cPickle"
+#  define FROM_FORMAT PyString_FromFormat
+#endif
+
+#ifndef PyVarObject_HEAD_INIT
+#  define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
+#endif
+
+#ifndef Py_TPFLAGS_HAVE_WEAKREFS
+#  define Py_TPFLAGS_HAVE_WEAKREFS 0
+#endif
+
+/*
+ * Connection definition
+ */
+
+#define CONNECTION_BUFFER_SIZE 1024
+
+typedef struct {
+       PyObject_HEAD
+       HANDLE handle;
+       int flags;
+       PyObject *weakreflist;
+       char buffer[CONNECTION_BUFFER_SIZE];
+} ConnectionObject;
+
+/*
+ * Miscellaneous
+ */
+
+#define MAX_MESSAGE_LENGTH 0x7fffffff
+
+#ifndef MIN
+#  define MIN(x, y) ((x) < (y) ? x : y)
+#  define MAX(x, y) ((x) > (y) ? x : y)
+#endif
+
+#endif /* MULTIPROCESSING_H */
index a96338f996b2220ce36e72385cae066df902e452..1592cca2f24d7d53c7c0db0cdefb4d15b678a898 100644 (file)
-/*\r
- * A type which wraps a pipe handle in message oriented mode\r
- *\r
- * pipe_connection.c\r
- *\r
- * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
- */\r
-\r
-#include "multiprocessing.h"\r
-\r
-#define CLOSE(h) CloseHandle(h)\r
-\r
-/*\r
- * Send string to the pipe; assumes in message oriented mode\r
- */\r
-\r
-static Py_ssize_t\r
-conn_send_string(ConnectionObject *conn, char *string, size_t length)\r
-{\r
-       DWORD amount_written;\r
-\r
-       return WriteFile(conn->handle, string, length, &amount_written, NULL)\r
-               ? MP_SUCCESS : MP_STANDARD_ERROR;\r
-}\r
-\r
-/*\r
- * Attempts to read into buffer, or if buffer too small into *newbuffer.\r
- *\r
- * Returns number of bytes read.  Assumes in message oriented mode.\r
- */\r
-\r
-static Py_ssize_t\r
-conn_recv_string(ConnectionObject *conn, char *buffer, \r
-                size_t buflength, char **newbuffer, size_t maxlength)\r
-{\r
-       DWORD left, length, full_length, err;\r
-\r
-       *newbuffer = NULL;\r
-\r
-       if (ReadFile(conn->handle, buffer, MIN(buflength, maxlength), \r
-                    &length, NULL))\r
-               return length;\r
-\r
-       err = GetLastError();\r
-       if (err != ERROR_MORE_DATA) {\r
-               if (err == ERROR_BROKEN_PIPE)\r
-                       return MP_END_OF_FILE;\r
-               return MP_STANDARD_ERROR;\r
-       }\r
-\r
-       if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left))\r
-               return MP_STANDARD_ERROR;\r
-\r
-       full_length = length + left;\r
-       if (full_length > maxlength)\r
-               return MP_BAD_MESSAGE_LENGTH;\r
-\r
-       *newbuffer = PyMem_Malloc(full_length);\r
-       if (*newbuffer == NULL)\r
-               return MP_MEMORY_ERROR;\r
-\r
-       memcpy(*newbuffer, buffer, length);\r
-\r
-       if (ReadFile(conn->handle, *newbuffer+length, left, &length, NULL)) {\r
-               assert(length == left);\r
-               return full_length;\r
-       } else {\r
-               PyMem_Free(*newbuffer);\r
-               return MP_STANDARD_ERROR;\r
-       }\r
-}\r
-\r
-/*\r
- * Check whether any data is available for reading\r
- */\r
-\r
-#define conn_poll(conn, timeout) conn_poll_save(conn, timeout, _save)\r
-\r
-static int\r
-conn_poll_save(ConnectionObject *conn, double timeout, PyThreadState *_save)\r
-{\r
-       DWORD bytes, deadline, delay;\r
-       int difference, res;\r
-       BOOL block = FALSE;\r
-\r
-       if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))\r
-               return MP_STANDARD_ERROR;\r
-\r
-       if (timeout == 0.0)\r
-               return bytes > 0;\r
-\r
-       if (timeout < 0.0)\r
-               block = TRUE;\r
-       else\r
-               /* XXX does not check for overflow */\r
-               deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5);\r
-\r
-       Sleep(0);\r
-\r
-       for (delay = 1 ; ; delay += 1) {\r
-               if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))\r
-                       return MP_STANDARD_ERROR;\r
-               else if (bytes > 0)\r
-                       return TRUE;\r
-\r
-               if (!block) {\r
-                       difference = deadline - GetTickCount();\r
-                       if (difference < 0)\r
-                               return FALSE;\r
-                       if ((int)delay > difference)\r
-                               delay = difference;\r
-               }\r
-\r
-               if (delay > 20)\r
-                       delay = 20;\r
-\r
-               Sleep(delay);\r
-\r
-               /* check for signals */\r
-               Py_BLOCK_THREADS \r
-               res = PyErr_CheckSignals();\r
-               Py_UNBLOCK_THREADS\r
-\r
-               if (res)\r
-                       return MP_EXCEPTION_HAS_BEEN_SET;\r
-       }\r
-}\r
-\r
-/*\r
- * "connection.h" defines the PipeConnection type using the definitions above\r
- */\r
-\r
-#define CONNECTION_NAME "PipeConnection"\r
-#define CONNECTION_TYPE PipeConnectionType\r
-\r
-#include "connection.h"\r
+/*
+ * A type which wraps a pipe handle in message oriented mode
+ *
+ * pipe_connection.c
+ *
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+ */
+
+#include "multiprocessing.h"
+
+#define CLOSE(h) CloseHandle(h)
+
+/*
+ * Send string to the pipe; assumes in message oriented mode
+ */
+
+static Py_ssize_t
+conn_send_string(ConnectionObject *conn, char *string, size_t length)
+{
+       DWORD amount_written;
+
+       return WriteFile(conn->handle, string, length, &amount_written, NULL)
+               ? MP_SUCCESS : MP_STANDARD_ERROR;
+}
+
+/*
+ * Attempts to read into buffer, or if buffer too small into *newbuffer.
+ *
+ * Returns number of bytes read.  Assumes in message oriented mode.
+ */
+
+static Py_ssize_t
+conn_recv_string(ConnectionObject *conn, char *buffer, 
+                size_t buflength, char **newbuffer, size_t maxlength)
+{
+       DWORD left, length, full_length, err;
+
+       *newbuffer = NULL;
+
+       if (ReadFile(conn->handle, buffer, MIN(buflength, maxlength), 
+                    &length, NULL))
+               return length;
+
+       err = GetLastError();
+       if (err != ERROR_MORE_DATA) {
+               if (err == ERROR_BROKEN_PIPE)
+                       return MP_END_OF_FILE;
+               return MP_STANDARD_ERROR;
+       }
+
+       if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left))
+               return MP_STANDARD_ERROR;
+
+       full_length = length + left;
+       if (full_length > maxlength)
+               return MP_BAD_MESSAGE_LENGTH;
+
+       *newbuffer = PyMem_Malloc(full_length);
+       if (*newbuffer == NULL)
+               return MP_MEMORY_ERROR;
+
+       memcpy(*newbuffer, buffer, length);
+
+       if (ReadFile(conn->handle, *newbuffer+length, left, &length, NULL)) {
+               assert(length == left);
+               return full_length;
+       } else {
+               PyMem_Free(*newbuffer);
+               return MP_STANDARD_ERROR;
+       }
+}
+
+/*
+ * Check whether any data is available for reading
+ */
+
+#define conn_poll(conn, timeout) conn_poll_save(conn, timeout, _save)
+
+static int
+conn_poll_save(ConnectionObject *conn, double timeout, PyThreadState *_save)
+{
+       DWORD bytes, deadline, delay;
+       int difference, res;
+       BOOL block = FALSE;
+
+       if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
+               return MP_STANDARD_ERROR;
+
+       if (timeout == 0.0)
+               return bytes > 0;
+
+       if (timeout < 0.0)
+               block = TRUE;
+       else
+               /* XXX does not check for overflow */
+               deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5);
+
+       Sleep(0);
+
+       for (delay = 1 ; ; delay += 1) {
+               if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
+                       return MP_STANDARD_ERROR;
+               else if (bytes > 0)
+                       return TRUE;
+
+               if (!block) {
+                       difference = deadline - GetTickCount();
+                       if (difference < 0)
+                               return FALSE;
+                       if ((int)delay > difference)
+                               delay = difference;
+               }
+
+               if (delay > 20)
+                       delay = 20;
+
+               Sleep(delay);
+
+               /* check for signals */
+               Py_BLOCK_THREADS 
+               res = PyErr_CheckSignals();
+               Py_UNBLOCK_THREADS
+
+               if (res)
+                       return MP_EXCEPTION_HAS_BEEN_SET;
+       }
+}
+
+/*
+ * "connection.h" defines the PipeConnection type using the definitions above
+ */
+
+#define CONNECTION_NAME "PipeConnection"
+#define CONNECTION_TYPE PipeConnectionType
+
+#include "connection.h"
index 2bb113490a5d43a73c224d6559c5a260dc4c0dc0..513fc02b1a5d1cfa8a77c81fe4e0bce1b4e92214 100644 (file)
-/*\r
- * Win32 functions used by multiprocessing package\r
- *\r
- * win32_functions.c\r
- *\r
- * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt\r
- */\r
-\r
-#include "multiprocessing.h"\r
-\r
-\r
-#define WIN32_FUNCTION(func) \\r
-    {#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}\r
-\r
-#define WIN32_CONSTANT(fmt, con) \\r
-    PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))\r
-\r
-\r
-static PyObject *\r
-win32_CloseHandle(PyObject *self, PyObject *args)\r
-{\r
-       HANDLE hObject;\r
-       BOOL success;\r
-\r
-       if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))\r
-               return NULL;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       success = CloseHandle(hObject); \r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (!success)\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       Py_RETURN_NONE;\r
-}\r
-\r
-static PyObject *\r
-win32_ConnectNamedPipe(PyObject *self, PyObject *args)\r
-{\r
-       HANDLE hNamedPipe;\r
-       LPOVERLAPPED lpOverlapped;\r
-       BOOL success;\r
-\r
-       if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER, \r
-                             &hNamedPipe, &lpOverlapped))\r
-               return NULL;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       success = ConnectNamedPipe(hNamedPipe, lpOverlapped);\r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (!success)\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       Py_RETURN_NONE;\r
-}\r
-\r
-static PyObject *\r
-win32_CreateFile(PyObject *self, PyObject *args)\r
-{\r
-       LPCTSTR lpFileName;\r
-       DWORD dwDesiredAccess;\r
-       DWORD dwShareMode;\r
-       LPSECURITY_ATTRIBUTES lpSecurityAttributes;\r
-       DWORD dwCreationDisposition;\r
-       DWORD dwFlagsAndAttributes;\r
-       HANDLE hTemplateFile;\r
-       HANDLE handle;\r
-\r
-       if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER \r
-                             F_DWORD F_DWORD F_HANDLE,\r
-                             &lpFileName, &dwDesiredAccess, &dwShareMode, \r
-                             &lpSecurityAttributes, &dwCreationDisposition, \r
-                             &dwFlagsAndAttributes, &hTemplateFile))\r
-               return NULL;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       handle = CreateFile(lpFileName, dwDesiredAccess, \r
-                           dwShareMode, lpSecurityAttributes, \r
-                           dwCreationDisposition, \r
-                           dwFlagsAndAttributes, hTemplateFile);\r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (handle == INVALID_HANDLE_VALUE)\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       return Py_BuildValue(F_HANDLE, handle);\r
-}\r
-\r
-static PyObject *\r
-win32_CreateNamedPipe(PyObject *self, PyObject *args)\r
-{\r
-       LPCTSTR lpName;\r
-       DWORD dwOpenMode;\r
-       DWORD dwPipeMode;\r
-       DWORD nMaxInstances;\r
-       DWORD nOutBufferSize;\r
-       DWORD nInBufferSize;\r
-       DWORD nDefaultTimeOut;\r
-       LPSECURITY_ATTRIBUTES lpSecurityAttributes;\r
-       HANDLE handle;\r
-\r
-       if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD \r
-                             F_DWORD F_DWORD F_DWORD F_POINTER,\r
-                             &lpName, &dwOpenMode, &dwPipeMode, \r
-                             &nMaxInstances, &nOutBufferSize, \r
-                             &nInBufferSize, &nDefaultTimeOut,\r
-                             &lpSecurityAttributes))\r
-               return NULL;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, \r
-                                nMaxInstances, nOutBufferSize, \r
-                                nInBufferSize, nDefaultTimeOut,\r
-                                lpSecurityAttributes);\r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (handle == INVALID_HANDLE_VALUE)\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       return Py_BuildValue(F_HANDLE, handle);\r
-}\r
-\r
-static PyObject *\r
-win32_ExitProcess(PyObject *self, PyObject *args)\r
-{\r
-       UINT uExitCode;\r
-\r
-       if (!PyArg_ParseTuple(args, "I", &uExitCode))\r
-               return NULL;\r
-\r
-       ExitProcess(uExitCode);\r
-\r
-       return NULL;\r
-}\r
-\r
-static PyObject *\r
-win32_GetLastError(PyObject *self, PyObject *args)\r
-{\r
-       return Py_BuildValue(F_DWORD, GetLastError());\r
-}\r
-\r
-static PyObject *\r
-win32_OpenProcess(PyObject *self, PyObject *args)\r
-{\r
-       DWORD dwDesiredAccess;\r
-       BOOL bInheritHandle;\r
-       DWORD dwProcessId;\r
-       HANDLE handle;\r
-\r
-       if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD, \r
-                             &dwDesiredAccess, &bInheritHandle, &dwProcessId))\r
-               return NULL;\r
-\r
-       handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId);    \r
-       if (handle == NULL)\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       return Py_BuildValue(F_HANDLE, handle);\r
-}\r
-\r
-static PyObject *\r
-win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)\r
-{\r
-       HANDLE hNamedPipe;\r
-       PyObject *oArgs[3];\r
-       DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL};\r
-       int i;\r
-\r
-       if (!PyArg_ParseTuple(args, F_HANDLE "OOO", \r
-                             &hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2]))\r
-               return NULL;\r
-\r
-       PyErr_Clear();\r
-\r
-       for (i = 0 ; i < 3 ; i++) {\r
-               if (oArgs[i] != Py_None) {\r
-                       dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]);\r
-                       if (PyErr_Occurred())\r
-                               return NULL;\r
-                       pArgs[i] = &dwArgs[i];\r
-               }\r
-       }\r
-\r
-       if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2]))\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       Py_RETURN_NONE;\r
-}\r
-\r
-static PyObject *\r
-win32_WaitNamedPipe(PyObject *self, PyObject *args)\r
-{\r
-       LPCTSTR lpNamedPipeName;\r
-       DWORD nTimeOut;\r
-       BOOL success;\r
-\r
-       if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut))\r
-               return NULL;\r
-\r
-       Py_BEGIN_ALLOW_THREADS\r
-       success = WaitNamedPipe(lpNamedPipeName, nTimeOut);\r
-       Py_END_ALLOW_THREADS\r
-\r
-       if (!success)\r
-               return PyErr_SetFromWindowsErr(0);\r
-\r
-       Py_RETURN_NONE;\r
-}\r
-\r
-static PyMethodDef win32_methods[] = {\r
-       WIN32_FUNCTION(CloseHandle),\r
-       WIN32_FUNCTION(GetLastError),\r
-       WIN32_FUNCTION(OpenProcess),\r
-       WIN32_FUNCTION(ExitProcess),\r
-       WIN32_FUNCTION(ConnectNamedPipe),\r
-       WIN32_FUNCTION(CreateFile),\r
-       WIN32_FUNCTION(CreateNamedPipe),\r
-       WIN32_FUNCTION(SetNamedPipeHandleState),\r
-       WIN32_FUNCTION(WaitNamedPipe),\r
-       {NULL}\r
-};\r
-\r
-\r
-PyTypeObject Win32Type = {\r
-       PyVarObject_HEAD_INIT(NULL, 0)\r
-};\r
-\r
-\r
-PyObject *\r
-create_win32_namespace(void)\r
-{\r
-       Win32Type.tp_name = "_multiprocessing.win32";\r
-       Win32Type.tp_methods = win32_methods;\r
-       if (PyType_Ready(&Win32Type) < 0)\r
-               return NULL;\r
-       Py_INCREF(&Win32Type);\r
-\r
-       WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);\r
-       WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);\r
-       WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);\r
-       WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);\r
-       WIN32_CONSTANT(F_DWORD, GENERIC_READ);\r
-       WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);\r
-       WIN32_CONSTANT(F_DWORD, INFINITE);\r
-       WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER);\r
-       WIN32_CONSTANT(F_DWORD, OPEN_EXISTING);\r
-       WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX);\r
-       WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND);\r
-       WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE);\r
-       WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE);\r
-       WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES);\r
-       WIN32_CONSTANT(F_DWORD, PIPE_WAIT);\r
-       WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS);\r
-\r
-       WIN32_CONSTANT("i", NULL);\r
-\r
-       return (PyObject*)&Win32Type;\r
-}\r
+/*
+ * Win32 functions used by multiprocessing package
+ *
+ * win32_functions.c
+ *
+ * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+ */
+
+#include "multiprocessing.h"
+
+
+#define WIN32_FUNCTION(func) \
+    {#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}
+
+#define WIN32_CONSTANT(fmt, con) \
+    PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))
+
+
+static PyObject *
+win32_CloseHandle(PyObject *self, PyObject *args)
+{
+       HANDLE hObject;
+       BOOL success;
+
+       if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))
+               return NULL;
+
+       Py_BEGIN_ALLOW_THREADS
+       success = CloseHandle(hObject); 
+       Py_END_ALLOW_THREADS
+
+       if (!success)
+               return PyErr_SetFromWindowsErr(0);
+
+       Py_RETURN_NONE;
+}
+
+static PyObject *
+win32_ConnectNamedPipe(PyObject *self, PyObject *args)
+{
+       HANDLE hNamedPipe;
+       LPOVERLAPPED lpOverlapped;
+       BOOL success;
+
+       if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER, 
+                             &hNamedPipe, &lpOverlapped))
+               return NULL;
+
+       Py_BEGIN_ALLOW_THREADS
+       success = ConnectNamedPipe(hNamedPipe, lpOverlapped);
+       Py_END_ALLOW_THREADS
+
+       if (!success)
+               return PyErr_SetFromWindowsErr(0);
+
+       Py_RETURN_NONE;
+}
+
+static PyObject *
+win32_CreateFile(PyObject *self, PyObject *args)
+{
+       LPCTSTR lpFileName;
+       DWORD dwDesiredAccess;
+       DWORD dwShareMode;
+       LPSECURITY_ATTRIBUTES lpSecurityAttributes;
+       DWORD dwCreationDisposition;
+       DWORD dwFlagsAndAttributes;
+       HANDLE hTemplateFile;
+       HANDLE handle;
+
+       if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER 
+                             F_DWORD F_DWORD F_HANDLE,
+                             &lpFileName, &dwDesiredAccess, &dwShareMode, 
+                             &lpSecurityAttributes, &dwCreationDisposition, 
+                             &dwFlagsAndAttributes, &hTemplateFile))
+               return NULL;
+
+       Py_BEGIN_ALLOW_THREADS
+       handle = CreateFile(lpFileName, dwDesiredAccess, 
+                           dwShareMode, lpSecurityAttributes, 
+                           dwCreationDisposition, 
+                           dwFlagsAndAttributes, hTemplateFile);
+       Py_END_ALLOW_THREADS
+
+       if (handle == INVALID_HANDLE_VALUE)
+               return PyErr_SetFromWindowsErr(0);
+
+       return Py_BuildValue(F_HANDLE, handle);
+}
+
+static PyObject *
+win32_CreateNamedPipe(PyObject *self, PyObject *args)
+{
+       LPCTSTR lpName;
+       DWORD dwOpenMode;
+       DWORD dwPipeMode;
+       DWORD nMaxInstances;
+       DWORD nOutBufferSize;
+       DWORD nInBufferSize;
+       DWORD nDefaultTimeOut;
+       LPSECURITY_ATTRIBUTES lpSecurityAttributes;
+       HANDLE handle;
+
+       if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD 
+                             F_DWORD F_DWORD F_DWORD F_POINTER,
+                             &lpName, &dwOpenMode, &dwPipeMode, 
+                             &nMaxInstances, &nOutBufferSize, 
+                             &nInBufferSize, &nDefaultTimeOut,
+                             &lpSecurityAttributes))
+               return NULL;
+
+       Py_BEGIN_ALLOW_THREADS
+       handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, 
+                                nMaxInstances, nOutBufferSize, 
+                                nInBufferSize, nDefaultTimeOut,
+                                lpSecurityAttributes);
+       Py_END_ALLOW_THREADS
+
+       if (handle == INVALID_HANDLE_VALUE)
+               return PyErr_SetFromWindowsErr(0);
+
+       return Py_BuildValue(F_HANDLE, handle);
+}
+
+static PyObject *
+win32_ExitProcess(PyObject *self, PyObject *args)
+{
+       UINT uExitCode;
+
+       if (!PyArg_ParseTuple(args, "I", &uExitCode))
+               return NULL;
+
+       ExitProcess(uExitCode);
+
+       return NULL;
+}
+
+static PyObject *
+win32_GetLastError(PyObject *self, PyObject *args)
+{
+       return Py_BuildValue(F_DWORD, GetLastError());
+}
+
+static PyObject *
+win32_OpenProcess(PyObject *self, PyObject *args)
+{
+       DWORD dwDesiredAccess;
+       BOOL bInheritHandle;
+       DWORD dwProcessId;
+       HANDLE handle;
+
+       if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD, 
+                             &dwDesiredAccess, &bInheritHandle, &dwProcessId))
+               return NULL;
+
+       handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId);    
+       if (handle == NULL)
+               return PyErr_SetFromWindowsErr(0);
+
+       return Py_BuildValue(F_HANDLE, handle);
+}
+
+static PyObject *
+win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)
+{
+       HANDLE hNamedPipe;
+       PyObject *oArgs[3];
+       DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL};
+       int i;
+
+       if (!PyArg_ParseTuple(args, F_HANDLE "OOO", 
+                             &hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2]))
+               return NULL;
+
+       PyErr_Clear();
+
+       for (i = 0 ; i < 3 ; i++) {
+               if (oArgs[i] != Py_None) {
+                       dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]);
+                       if (PyErr_Occurred())
+                               return NULL;
+                       pArgs[i] = &dwArgs[i];
+               }
+       }
+
+       if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2]))
+               return PyErr_SetFromWindowsErr(0);
+
+       Py_RETURN_NONE;
+}
+
+static PyObject *
+win32_WaitNamedPipe(PyObject *self, PyObject *args)
+{
+       LPCTSTR lpNamedPipeName;
+       DWORD nTimeOut;
+       BOOL success;
+
+       if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut))
+               return NULL;
+
+       Py_BEGIN_ALLOW_THREADS
+       success = WaitNamedPipe(lpNamedPipeName, nTimeOut);
+       Py_END_ALLOW_THREADS
+
+       if (!success)
+               return PyErr_SetFromWindowsErr(0);
+
+       Py_RETURN_NONE;
+}
+
+static PyMethodDef win32_methods[] = {
+       WIN32_FUNCTION(CloseHandle),
+       WIN32_FUNCTION(GetLastError),
+       WIN32_FUNCTION(OpenProcess),
+       WIN32_FUNCTION(ExitProcess),
+       WIN32_FUNCTION(ConnectNamedPipe),
+       WIN32_FUNCTION(CreateFile),
+       WIN32_FUNCTION(CreateNamedPipe),
+       WIN32_FUNCTION(SetNamedPipeHandleState),
+       WIN32_FUNCTION(WaitNamedPipe),
+       {NULL}
+};
+
+
+PyTypeObject Win32Type = {
+       PyVarObject_HEAD_INIT(NULL, 0)
+};
+
+
+PyObject *
+create_win32_namespace(void)
+{
+       Win32Type.tp_name = "_multiprocessing.win32";
+       Win32Type.tp_methods = win32_methods;
+       if (PyType_Ready(&Win32Type) < 0)
+               return NULL;
+       Py_INCREF(&Win32Type);
+
+       WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
+       WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
+       WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
+       WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
+       WIN32_CONSTANT(F_DWORD, GENERIC_READ);
+       WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);
+       WIN32_CONSTANT(F_DWORD, INFINITE);
+       WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER);
+       WIN32_CONSTANT(F_DWORD, OPEN_EXISTING);
+       WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX);
+       WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND);
+       WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE);
+       WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE);
+       WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES);
+       WIN32_CONSTANT(F_DWORD, PIPE_WAIT);
+       WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS);
+
+       WIN32_CONSTANT("i", NULL);
+
+       return (PyObject*)&Win32Type;
+}