Skip to content
This repository has been archived by the owner on Nov 30, 2019. It is now read-only.

Commit

Permalink
Remove unused method
Browse files Browse the repository at this point in the history
  • Loading branch information
Yandu Oppacher committed Nov 14, 2014
1 parent 76a6c37 commit 0864b5d
Showing 1 changed file with 0 additions and 11 deletions.
11 changes: 0 additions & 11 deletions python/pyspark/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import sys
from threading import Lock
from tempfile import NamedTemporaryFile
import atexit

from pyspark import accumulators
from pyspark.accumulators import Accumulator
Expand Down Expand Up @@ -813,16 +812,6 @@ def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
it = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, javaPartitions, allowLocal)
return list(mappedRDD._collect_iterator_through_file(it))

def _add_profiler(self, id, profiler):
if not self._profile_stats:
dump_path = self._conf.get("spark.python.profile.dump")
if dump_path:
atexit.register(self.dump_profiles, dump_path)
else:
atexit.register(self.show_profiles)

self._profile_stats.append([id, profiler, False])

def show_profiles(self):
self.profiler_collector.show_profiles()

Expand Down

0 comments on commit 0864b5d

Please sign in to comment.