aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/profiler.py
diff options
context:
space:
mode:
authorYandu Oppacher <yandu.oppacher@jadedpixel.com>2015-01-28 13:48:06 -0800
committerJosh Rosen <joshrosen@databricks.com>2015-01-28 13:48:06 -0800
commit3bead67d5926a2a798ca0e2bc71e747380493787 (patch)
treece36d8e926702f8da17b9f43c49e1682d1e9fccb /python/pyspark/profiler.py
parenta731314c319a6f265060e05267844069027804fd (diff)
downloadspark-3bead67d5926a2a798ca0e2bc71e747380493787.tar.gz
spark-3bead67d5926a2a798ca0e2bc71e747380493787.tar.bz2
spark-3bead67d5926a2a798ca0e2bc71e747380493787.zip
[SPARK-4387][PySpark] Refactoring python profiling code to make it extensible
This PR is based on #3255 , fix conflicts and code style. Closes #3255. Author: Yandu Oppacher <yandu.oppacher@jadedpixel.com> Author: Davies Liu <davies@databricks.com> Closes #3901 from davies/refactor-python-profile-code and squashes the following commits: b4a9306 [Davies Liu] fix tests 4b79ce8 [Davies Liu] add docstring for profiler_cls 2700e47 [Davies Liu] use BasicProfiler as default 349e341 [Davies Liu] more refactor 6a5d4df [Davies Liu] refactor and fix tests 31bf6b6 [Davies Liu] fix code style 0864b5d [Yandu Oppacher] Remove unused method 76a6c37 [Yandu Oppacher] Added a profile collector to accumulate the profilers per stage 9eefc36 [Yandu Oppacher] Fix doc 9ace076 [Yandu Oppacher] Refactor of profiler, and moved tests around 8739aff [Yandu Oppacher] Code review fixes 9bda3ec [Yandu Oppacher] Refactor profiler code
Diffstat (limited to 'python/pyspark/profiler.py')
-rw-r--r--python/pyspark/profiler.py172
1 files changed, 172 insertions, 0 deletions
diff --git a/python/pyspark/profiler.py b/python/pyspark/profiler.py
new file mode 100644
index 0000000000..4408996db0
--- /dev/null
+++ b/python/pyspark/profiler.py
@@ -0,0 +1,172 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import cProfile
+import pstats
+import os
+import atexit
+
+from pyspark.accumulators import AccumulatorParam
+
+
+class ProfilerCollector(object):
+ """
+ This class keeps track of different profilers on a per
+ stage basis. Also this is used to create new profilers for
+ the different stages.
+ """
+
+ def __init__(self, profiler_cls, dump_path=None):
+ self.profiler_cls = profiler_cls
+ self.profile_dump_path = dump_path
+ self.profilers = []
+
+ def new_profiler(self, ctx):
+ """ Create a new profiler using class `profiler_cls` """
+ return self.profiler_cls(ctx)
+
+ def add_profiler(self, id, profiler):
+ """ Add a profiler for RDD `id` """
+ if not self.profilers:
+ if self.profile_dump_path:
+ atexit.register(self.dump_profiles, self.profile_dump_path)
+ else:
+ atexit.register(self.show_profiles)
+
+ self.profilers.append([id, profiler, False])
+
+ def dump_profiles(self, path):
+ """ Dump the profile stats into directory `path` """
+ for id, profiler, _ in self.profilers:
+ profiler.dump(id, path)
+ self.profilers = []
+
+ def show_profiles(self):
+ """ Print the profile stats to stdout """
+ for i, (id, profiler, showed) in enumerate(self.profilers):
+ if not showed and profiler:
+ profiler.show(id)
+ # mark it as showed
+ self.profilers[i][2] = True
+
+
+class Profiler(object):
+ """
+ .. note:: DeveloperApi
+
+ PySpark supports custom profilers, this is to allow for different profilers to
+ be used as well as outputting to different formats than what is provided in the
+ BasicProfiler.
+
+ A custom profiler has to define or inherit the following methods:
+ profile - will produce a system profile of some sort.
+ stats - return the collected stats.
+ dump - dumps the profiles to a path
+ add - adds a profile to the existing accumulated profile
+
+ The profiler class is chosen when creating a SparkContext
+
+ >>> from pyspark import SparkConf, SparkContext
+ >>> from pyspark import BasicProfiler
+ >>> class MyCustomProfiler(BasicProfiler):
+ ... def show(self, id):
+ ... print "My custom profiles for RDD:%s" % id
+ ...
+ >>> conf = SparkConf().set("spark.python.profile", "true")
+ >>> sc = SparkContext('local', 'test', conf=conf, profiler_cls=MyCustomProfiler)
+ >>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
+ [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
+ >>> sc.show_profiles()
+ My custom profiles for RDD:1
+ My custom profiles for RDD:2
+ >>> sc.stop()
+ """
+
+ def __init__(self, ctx):
+ pass
+
+ def profile(self, func):
+ """ Do profiling on the function `func`"""
+ raise NotImplemented
+
+ def stats(self):
+ """ Return the collected profiling stats (pstats.Stats)"""
+ raise NotImplemented
+
+ def show(self, id):
+ """ Print the profile stats to stdout, id is the RDD id """
+ stats = self.stats()
+ if stats:
+ print "=" * 60
+ print "Profile of RDD<id=%d>" % id
+ print "=" * 60
+ stats.sort_stats("time", "cumulative").print_stats()
+
+ def dump(self, id, path):
+ """ Dump the profile into path, id is the RDD id """
+ if not os.path.exists(path):
+ os.makedirs(path)
+ stats = self.stats()
+ if stats:
+ p = os.path.join(path, "rdd_%d.pstats" % id)
+ stats.dump_stats(p)
+
+
+class PStatsParam(AccumulatorParam):
+ """PStatsParam is used to merge pstats.Stats"""
+
+ @staticmethod
+ def zero(value):
+ return None
+
+ @staticmethod
+ def addInPlace(value1, value2):
+ if value1 is None:
+ return value2
+ value1.add(value2)
+ return value1
+
+
+class BasicProfiler(Profiler):
+ """
+ BasicProfiler is the default profiler, which is implemented based on
+ cProfile and Accumulator
+ """
+ def __init__(self, ctx):
+ Profiler.__init__(self, ctx)
+ # Creates a new accumulator for combining the profiles of different
+ # partitions of a stage
+ self._accumulator = ctx.accumulator(None, PStatsParam)
+
+ def profile(self, func):
+ """ Runs and profiles the method to_profile passed in. A profile object is returned. """
+ pr = cProfile.Profile()
+ pr.runcall(func)
+ st = pstats.Stats(pr)
+ st.stream = None # make it picklable
+ st.strip_dirs()
+
+ # Adds a new profile to the existing accumulated value
+ self._accumulator.add(st)
+
+ def stats(self):
+ return self._accumulator.value
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()