forked from apache/spark
-
Notifications
You must be signed in to change notification settings - Fork 8
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SPARK-46687][PYTHON][CONNECT] Basic support of SparkSession-based me…
…mory profiler ### What changes were proposed in this pull request? Basic support of SparkSession-based memory profiler in both Spark Connect and non-Spark-Connect. ### Why are the changes needed? We need to make the memory profiler SparkSession-based to support memory profiling in Spark Connect. ### Does this PR introduce _any_ user-facing change? Yes, the SparkSession-based memory profiler is available. An example is as shown below ```py from pyspark.sql import SparkSession from pyspark.sql.functions import * from pyspark.taskcontext import TaskContext spark.conf.set("spark.sql.pyspark.udf.profiler", "memory") udf("string") def f(x): if TaskContext.get().partitionId() % 2 == 0: return str(x) else: return None spark.range(10).select(f(col("id"))).show() spark.showMemoryProfiles() ``` shows profile result: ``` ============================================================ Profile of UDF<id=2> ============================================================ Filename: /var/folders/h_/60n1p_5s7751jx1st4_sk0780000gp/T/ipykernel_72839/2848225169.py Line # Mem usage Increment Occurrences Line Contents ============================================================= 7 113.2 MiB 113.2 MiB 10 udf("string") 8 def f(x): 9 114.4 MiB 1.3 MiB 10 if TaskContext.get().partitionId() % 2 == 0: 10 31.8 MiB 0.1 MiB 4 return str(x) 11 else: 12 82.8 MiB 0.1 MiB 6 return None ``` ### How was this patch tested? New and existing unit tests: - pyspark.tests.test_memory_profiler - pyspark.sql.tests.connect.test_parity_memory_profiler And manual tests on Jupyter notebook. ### Was this patch authored or co-authored using generative AI tooling? No. Closes apache#44775 from xinrong-meng/memory_profiler_v2. Authored-by: Xinrong Meng <xinrong@apache.org> Signed-off-by: Takuya UESHIN <ueshin@databricks.com>
- Loading branch information
1 parent
c468c3d
commit 528ac8b
Showing
7 changed files
with
368 additions
and
25 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
59 changes: 59 additions & 0 deletions
59
python/pyspark/sql/tests/connect/test_parity_memory_profiler.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
# | ||
# Licensed to the Apache Software Foundation (ASF) under one or more | ||
# contributor license agreements. See the NOTICE file distributed with | ||
# this work for additional information regarding copyright ownership. | ||
# The ASF licenses this file to You under the Apache License, Version 2.0 | ||
# (the "License"); you may not use this file except in compliance with | ||
# the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
import inspect | ||
import os | ||
import unittest | ||
|
||
from pyspark.tests.test_memory_profiler import MemoryProfiler2TestsMixin, _do_computation | ||
from pyspark.testing.connectutils import ReusedConnectTestCase | ||
|
||
|
||
class MemoryProfilerParityTests(MemoryProfiler2TestsMixin, ReusedConnectTestCase): | ||
def setUp(self) -> None: | ||
super().setUp() | ||
self.spark._profiler_collector._value = None | ||
|
||
def test_memory_profiler_udf_multiple_actions(self): | ||
def action(df): | ||
df.collect() | ||
df.show() | ||
|
||
with self.sql_conf({"spark.sql.pyspark.udf.profiler": "memory"}): | ||
_do_computation(self.spark, action=action) | ||
|
||
self.assertEqual(6, len(self.profile_results), str(list(self.profile_results))) | ||
|
||
for id in self.profile_results: | ||
with self.trap_stdout() as io: | ||
self.spark.showMemoryProfiles(id) | ||
|
||
self.assertIn(f"Profile of UDF<id={id}>", io.getvalue()) | ||
self.assertRegex( | ||
io.getvalue(), f"Filename.*{os.path.basename(inspect.getfile(_do_computation))}" | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
from pyspark.sql.tests.connect.test_parity_memory_profiler import * # noqa: F401 | ||
|
||
try: | ||
import xmlrunner # type: ignore[import] | ||
|
||
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) | ||
except ImportError: | ||
testRunner = None | ||
unittest.main(testRunner=testRunner, verbosity=2) |
Oops, something went wrong.