aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/daemon.py
blob: ab9c19df578f8a6053cc8764082e53ffa34c25de (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import os
import sys
import multiprocessing
from errno import EINTR, ECHILD
from socket import socket, AF_INET, SOCK_STREAM, SOMAXCONN
from signal import signal, SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN
from pyspark.worker import main as worker_main
from pyspark.serializers import write_int

try:
    POOLSIZE = multiprocessing.cpu_count()
except NotImplementedError:
    POOLSIZE = 4

should_exit = multiprocessing.Event()


def worker(listen_sock):
    # Redirect stdout to stderr
    os.dup2(2, 1)

    # Manager sends SIGHUP to request termination of workers in the pool
    def handle_sighup(signum, frame):
        assert should_exit.is_set()
    signal(SIGHUP, handle_sighup)

    while not should_exit.is_set():
        # Wait until a client arrives or we have to exit
        sock = None
        while not should_exit.is_set() and sock is None:
            try:
                sock, addr = listen_sock.accept()
            except EnvironmentError as err:
                if err.errno != EINTR:
                    raise

        if sock is not None:
            # Fork to handle the client
            if os.fork() != 0:
                # Leave the worker pool
                signal(SIGHUP, SIG_DFL)
                listen_sock.close()
                # Handle the client then exit
                sockfile = sock.makefile()
                worker_main(sockfile, sockfile)
                sockfile.close()
                sock.close()
                os._exit(0)
            else:
                sock.close()

    assert should_exit.is_set()
    os._exit(0)


def manager():
    # Create a new process group to corral our children
    os.setpgid(0, 0)

    # Create a listening socket on the AF_INET loopback interface
    listen_sock = socket(AF_INET, SOCK_STREAM)
    listen_sock.bind(('127.0.0.1', 0))
    listen_sock.listen(max(1024, 2 * POOLSIZE, SOMAXCONN))
    listen_host, listen_port = listen_sock.getsockname()
    write_int(listen_port, sys.stdout)

    # Launch initial worker pool
    for idx in range(POOLSIZE):
        if os.fork() == 0:
            worker(listen_sock)
            raise RuntimeError("worker() unexpectedly returned")
    listen_sock.close()

    def shutdown():
        should_exit.set()

    # Gracefully exit on SIGTERM, don't die on SIGHUP
    signal(SIGTERM, lambda signum, frame: shutdown())
    signal(SIGHUP, SIG_IGN)

    # Cleanup zombie children
    def handle_sigchld(signum, frame):
        try:
            pid, status = os.waitpid(0, os.WNOHANG)
            if status != 0 and not should_exit.is_set():
                raise RuntimeError("worker crashed: %s, %s" % (pid, status))
        except EnvironmentError as err:
            if err.errno not in (ECHILD, EINTR):
                raise
    signal(SIGCHLD, handle_sigchld)

    # Initialization complete
    sys.stdout.close()
    try:
        while not should_exit.is_set():
            try:
                # Spark tells us to exit by closing stdin
                if os.read(0, 512) == '':
                    shutdown()
            except EnvironmentError as err:
                if err.errno != EINTR:
                    shutdown()
                    raise
    finally:
        should_exit.set()
        # Send SIGHUP to notify workers of shutdown
        os.kill(0, SIGHUP)


if __name__ == '__main__':
    manager()