diff --git a/python/docs/source/development/logger.rst b/python/docs/source/development/logger.rst index d809dbf728508..875f55f8c032d 100644 --- a/python/docs/source/development/logger.rst +++ b/python/docs/source/development/logger.rst @@ -50,7 +50,7 @@ Example log entry: "msg": "[DIVIDE_BY_ZERO] Division by zero. Use `try_divide` to tolerate divisor being 0 and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to \"false\" to bypass this error. SQLSTATE: 22012\n== DataFrame ==\n\"divide\" was called from\n/.../spark/python/test_error_context.py:17\n", "context": { "file": "/path/to/file.py", - "line_no": "17", + "line": "17", "fragment": "divide" "error_class": "DIVIDE_BY_ZERO" }, diff --git a/python/pyspark/errors/exceptions/base.py b/python/pyspark/errors/exceptions/base.py index e33492fbe15ed..a21ad7f0bb70f 100644 --- a/python/pyspark/errors/exceptions/base.py +++ b/python/pyspark/errors/exceptions/base.py @@ -137,11 +137,11 @@ def _log_exception(self) -> None: if query_context.contextType().name == "DataFrame": logger = PySparkLogger.getLogger("DataFrameQueryContextLogger") call_site = query_context.callSite().split(":") - line_no = call_site[1] if len(call_site) == 2 else "" + line = call_site[1] if len(call_site) == 2 else "" logger.exception( self.getMessage(), file=call_site[0], - line_no=line_no, + line=line, fragment=query_context.fragment(), error_class=self.getErrorClass(), )