Skip to content

SparkSession-based UDF profiler.

Sign in for the full log view
GitHub Actions / Report test results failed Dec 20, 2023 in 0s

46171 tests run, 899 skipped, 14 failed.

Annotations

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_column_order

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 157, in _has_next
    self._current = self._call_iter(
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 271, in _call_iter
    raise e
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 254, in _call_iter
    return iter_fun()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 158, in <lambda>
    lambda: next(self._iterator)  # type: ignore[arg-type]
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 541, in __next__
    return self._next()
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 967, in _next
    raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T00:03:35.265339669+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/testing/sqlutils.py", line 185, in sql_conf
    yield
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 630, in check_column_order
    grouped_df.apply(column_name_typo).collect()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1755, in collect
    table, schema = self._to_table()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1767, in _to_table
    table, schema = self._session.client.to_table(query, self._plan.observations)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 810, in to_table
    table, schema, _, _, _ = self._execute_and_fetch(req, observations)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1290, in _execute_and_fetch
    for response in self._execute_and_fetch_as_iterator(req, observations):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1268, in _execute_and_fetch_as_iterator
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1260, in _execute_and_fetch_as_iterator
    for b in generator:
  File "/usr/lib/python3.9/_collections_abc.py", line 330, in __next__
    return self.send(None)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 133, in send
    if not self._has_next():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 185, in _has_next
    raise e
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/reattach.py", line 153, in _has_next
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T00:13:46.598188071+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py", line 45, in test_column_order
    self.check_column_order()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 632, in check_column_order
    grouped_df.apply(invalid_positional_types).collect()
  File "/usr/lib/python3.9/contextlib.py", line 135, in __exit__
    self.gen.throw(type, value, traceback)
  File "/__w/apache-spark/apache-spark/python/pyspark/testing/sqlutils.py", line 189, in sql_conf
    self.spark.conf.unset(key)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/conf.py", line 74, in unset
    result = self._client.config(operation)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_complex_groupby

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1101, in _analyze
    resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T00:23:57.339860714+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 259, in test_complex_groupby
    result = df.groupby(col("id") % 2 == 0).apply(normalize).sort("id", "v").toPandas()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/group.py", line 287, in apply
    return self.applyInPandas(udf.func, schema=udf.returnType)  # type: ignore[attr-defined]
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/group.py", line 308, in applyInPandas
    cols=self._df.columns,
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 248, in columns
    return self.schema.names
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1780, in schema
    return self._session.client.schema(query)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 920, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1106, in _analyze
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1099, in _analyze
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_datatype_string

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1101, in _analyze
    resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T00:34:09.457584953+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 399, in test_datatype_string
    result = df.groupby("id").apply(foo_udf).sort("id").toPandas()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 492, in groupBy
    _cols.append(self[c])
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1106, in _analyze
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1099, in _analyze
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_decorator

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1101, in _analyze
    resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T00:44:21.663965325+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 237, in test_decorator
    result = df.groupby("id").apply(foo).sort("id").toPandas()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 492, in groupBy
    _cols.append(self[c])
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1724, in __getitem__
    self.select(item).isLocal()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1786, in isLocal
    result = self._session.client._analyze(method="is_local", plan=query).is_local
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1106, in _analyze
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1099, in _analyze
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_empty_groupby

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1101, in _analyze
    resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T00:54:33.698281061+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 274, in test_empty_groupby
    result = df.groupby().apply(normalize).sort("id", "v").toPandas()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/group.py", line 287, in apply
    return self.applyInPandas(udf.func, schema=udf.returnType)  # type: ignore[attr-defined]
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/group.py", line 308, in applyInPandas
    cols=self._df.columns,
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 248, in columns
    return self.schema.names
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1780, in schema
    return self._session.client.schema(query)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 920, in schema
    schema = self._analyze(method="schema", plan=plan).schema
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1106, in _analyze
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1099, in _analyze
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_grouped_over_window

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T01:04:45.884342786+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 705, in test_grouped_over_window
    df = self.spark.createDataFrame(data, ["id", "group", "ts", "result"])
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/session.py", line 515, in createDataFrame
    _schema = self._inferSchemaFromList(_data, _cols)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/session.py", line 322, in _inferSchemaFromList
    ) = self._client.get_configs(
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1345, in get_configs
    configs = dict(self.config(op).pairs)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_grouped_over_window_with_key

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T01:14:57.629706942+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 734, in test_grouped_over_window_with_key
    timezone = self.spark.conf.get("spark.sql.session.timeZone")
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/conf.py", line 66, in get
    result = self._client.config(operation)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_mixed_scalar_udfs_followed_by_groupby_apply

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T01:25:10.580174491+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 667, in test_mixed_scalar_udfs_followed_by_groupby_apply
    df = df.withColumn("v2", udf(lambda x: x + 1, "int")(df["v1"])).withColumn(
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/utils.py", line 192, in wrapped
    return getattr(functions, f.__name__)(*args, **kwargs)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/functions/builtin.py", line 3968, in udf
    return _create_py_udf(f=f, returnType=returnType, useArrow=useArrow)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/udf.py", line 66, in _create_py_udf
    str(session.conf.get("spark.sql.execution.pythonUDF.arrow.enabled")).lower()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/conf.py", line 66, in get
    result = self._client.config(operation)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_positional_assignment_conf

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T01:35:22.338899535+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 635, in test_positional_assignment_conf
    with self.sql_conf(
  File "/usr/lib/python3.9/contextlib.py", line 117, in __enter__
    return next(self.gen)
  File "/__w/apache-spark/apache-spark/python/pyspark/testing/sqlutils.py", line 181, in sql_conf
    old_values = [self.spark.conf.get(key, None) for key in keys]
  File "/__w/apache-spark/apache-spark/python/pyspark/testing/sqlutils.py", line 181, in <listcomp>
    old_values = [self.spark.conf.get(key, None) for key in keys]
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/conf.py", line 66, in get
    result = self._client.config(operation)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_self_join_with_pandas

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T01:45:33.509909902+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 654, in test_self_join_with_pandas
    df = self.spark.createDataFrame(
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/session.py", line 515, in createDataFrame
    _schema = self._inferSchemaFromList(_data, _cols)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/session.py", line 322, in _inferSchemaFromList
    ) = self._client.get_configs(
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1345, in get_configs
    configs = dict(self.config(op).pairs)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_timestamp_dst

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1101, in _analyze
    resp = self._stub.AnalyzePlan(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T01:55:44.98835263+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 468, in test_timestamp_dst
    df = self.spark.createDataFrame(dt, "timestamp").toDF("time")
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/session.py", line 358, in createDataFrame
    schema = self.client._analyze(  # type: ignore[assignment]
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1106, in _analyze
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1099, in _analyze
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.test_udf_with_key

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1377, in config
    resp = self._stub.Config(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {grpc_status:14, created_time:"2023-12-20T02:05:56.857049802+00:00"}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/tests/pandas/test_pandas_grouped_map.py", line 477, in test_udf_with_key
    pdf = df.toPandas()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/dataframe.py", line 1773, in toPandas
    return self._session.client.to_pandas(query, self._plan.observations)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 821, in to_pandas
    (self_destruct_conf,) = self.get_config_with_defaults(
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1356, in get_config_with_defaults
    configs = dict(self.config(op).pairs)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1382, in config
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1375, in config
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py

See this annotation in the file changed.

@github-actions github-actions / Report test results

python/pyspark/sql/tests/connect/test_parity_pandas_grouped_map.py.tearDownClass (pyspark.sql.tests.connect.test_parity_pandas_grouped_map.GroupedApplyInPandasTests)

[RETRIES_EXCEEDED] The maximum number of retries has been exceeded.
Raw output
Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1456, in release_session
    resp = self._stub.ReleaseSession(req, metadata=self._builder.metadata())
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1161, in __call__
    return _end_unary_response_blocking(state, call, False, None)
  File "/usr/local/lib/python3.9/dist-packages/grpc/_channel.py", line 1004, in _end_unary_response_blocking
    raise _InactiveRpcError(state)  # pytype: disable=not-instantiable
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
	status = StatusCode.UNAVAILABLE
	details = "failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused"
	debug_error_string = "UNKNOWN:failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:44641: Failed to connect to remote host: Connection refused {created_time:"2023-12-20T02:16:09.278271779+00:00", grpc_status:14}"
>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/__w/apache-spark/apache-spark/python/pyspark/testing/connectutils.py", line 194, in tearDownClass
    cls.spark.stop()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/session.py", line 665, in stop
    self.client.release_session()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1461, in release_session
    self._handle_error(error)
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1535, in _handle_error
    raise error
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/core.py", line 1454, in release_session
    for attempt in self._retrying():
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 251, in __iter__
    self._wait()
  File "/__w/apache-spark/apache-spark/python/pyspark/sql/connect/client/retries.py", line 236, in _wait
    raise RetriesExceeded(error_class="RETRIES_EXCEEDED", message_parameters={}) from exception
pyspark.errors.exceptions.base.RetriesExceeded: [RETRIES_EXCEEDED] The maximum number of retries has been exceeded.

Check failure on line 1 in RocksDBStateStoreStreamingAggregationSuite

See this annotation in the file changed.

@github-actions github-actions / Report test results

RocksDBStateStoreStreamingAggregationSuite.SPARK-35896: metrics in StateOperatorProgress are output correctly (RocksDBStateStore)

org.scalatest.exceptions.TestFailedException: 
Timed out waiting for stream: The code passed to failAfter did not complete within 120 seconds.
java.base/java.lang.Thread.getStackTrace(Thread.java:1619)
	org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:277)
	org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
	org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
	org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
	org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)
	org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7$adapted(StreamTest.scala:480)
	scala.collection.mutable.HashMap$Node.foreach(HashMap.scala:642)
	scala.collection.mutable.HashMap.foreach(HashMap.scala:504)
	org.apache.spark.sql.streaming.StreamTest.fetchStreamAnswer$1(StreamTest.scala:480)

	Caused by: 	null
	java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1764)
		org.apache.spark.sql.execution.streaming.StreamExecution.awaitOffset(StreamExecution.scala:481)
		org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$8(StreamTest.scala:482)
		scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
		org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
		org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
		org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
		org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
		org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
		org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)


== Progress ==
   StartStream(ProcessingTimeTrigger(0),org.apache.spark.util.SystemClock@5c603221,Map(spark.sql.shuffle.partitions -> 3),null)
   AddData to MemoryStream[value#19321]: 3,2,1,3
=> CheckLastBatch: [3,2],[2,1],[1,1]
   AssertOnQuery(<condition>, Check total state rows = List(3), updated state rows = List(3), rows dropped by watermark = List(0), removed state rows = Some(List(0)))
   AddData to MemoryStream[value#19321]: 1,4
   CheckLastBatch: [1,2],[4,1]
   AssertOnQuery(<condition>, Check operator progress metrics: operatorName = stateStoreSave, numShufflePartitions = 3, numStateStoreInstances = 3)

== Stream ==
Output Mode: Update
Stream state: {}
Thread state: alive
Thread stack trace: java.base@17.0.9/jdk.internal.misc.Unsafe.park(Native Method)
java.base@17.0.9/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
app//scala.concurrent.impl.Promise$DefaultPromise.tryAwait0(Promise.scala:243)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:255)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:104)
app//org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:342)
app//org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:983)
app//org.apache.spark.SparkContext.runJob(SparkContext.scala:2428)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2(WriteToDataSourceV2Exec.scala:386)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2$(WriteToDataSourceV2Exec.scala:360)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.writeWithV2(WriteToDataSourceV2Exec.scala:308)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.run(WriteToDataSourceV2Exec.scala:319)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
app//org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:4442)
app//org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:3674)
app//org.apache.spark.sql.Dataset$$Lambda$2326/0x00007fbcf12f0000.apply(Unknown Source)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4432)
app//org.apache.spark.sql.Dataset$$Lambda$2338/0x00007fbcf12f54a8.apply(Unknown Source)
app//org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4430)
app//org.apache.spark.sql.Dataset$$Lambda$2327/0x00007fbcf12f03d0.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2282/0x00007fbcf12d3be0.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2268/0x00007fbcf12d0b10.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:919)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.Dataset.withAction(Dataset.scala:4430)
app//org.apache.spark.sql.Dataset.collect(Dataset.scala:3674)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$17(MicroBatchExecution.scala:783)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2267/0x00007fbcf12d0850.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2282/0x00007fbcf12d3be0.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2268/0x00007fbcf12d0b10.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:919)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$16(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2265/0x00007fbcf12d0000.apply(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runBatch(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$2(MicroBatchExecution.scala:326)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1967/0x00007fbcf11f21b8.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$1(MicroBatchExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1964/0x00007fbcf11f1130.apply$mcZ$sp(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:279)
app//org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:311)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$Lambda$1955/0x00007fbcf11e8e58.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:919)
app//org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.$anonfun$run$1(StreamExecution.scala:211)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1$$Lambda$1951/0x00007fbcf11e76d0.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:211)


== Sink ==



== Plan ==
== Parsed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, c7dcd408-619e-4333-a8af-0050d070b913, Update, 0
+- Aggregate [value#19321], [value#19321, count(1) AS count(1)#19326L]
   +- StreamingDataSourceV2Relation [value#19321], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@5030e7ed, MemoryStream[value#19321], -1, 0

== Analyzed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, c7dcd408-619e-4333-a8af-0050d070b913, Update, 0
+- Aggregate [value#19321], [value#19321, count(1) AS count(1)#19326L]
   +- StreamingDataSourceV2Relation [value#19321], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@5030e7ed, MemoryStream[value#19321], -1, 0

== Optimized Logical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@c95101e]
+- Aggregate [value#19321], [value#19321, count(1) AS count(1)#19326L]
   +- StreamingDataSourceV2Relation [value#19321], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@5030e7ed, MemoryStream[value#19321], -1, 0

== Physical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@c95101e], org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$$Lambda$2211/0x00007fbcf129dfa0@7f4e59d6
+- *(4) HashAggregate(keys=[value#19321], functions=[count(1)], output=[value#19321, count(1)#19326L])
   +- StateStoreSave [value#19321], state info [ checkpoint = file:/home/runner/work/apache-spark/apache-spark/target/tmp/streaming.metadata-34c7ea0d-1112-40b1-ba50-e69024d50d1a/state, runId = c22f2402-e633-4634-a564-bc7b51bb20ac, opId = 0, ver = 0, numPartitions = 3], Update, 0, 0, 2
      +- *(3) HashAggregate(keys=[value#19321], functions=[merge_count(1)], output=[value#19321, count#19355L])
         +- StateStoreRestore [value#19321], state info [ checkpoint = file:/home/runner/work/apache-spark/apache-spark/target/tmp/streaming.metadata-34c7ea0d-1112-40b1-ba50-e69024d50d1a/state, runId = c22f2402-e633-4634-a564-bc7b51bb20ac, opId = 0, ver = 0, numPartitions = 3], 2
            +- *(2) HashAggregate(keys=[value#19321], functions=[merge_count(1)], output=[value#19321, count#19355L])
               +- Exchange hashpartitioning(value#19321, 3), ENSURE_REQUIREMENTS, [plan_id=88167]
                  +- *(1) HashAggregate(keys=[value#19321], functions=[partial_count(1)], output=[value#19321, count#19355L])
                     +- *(1) Project [value#19321]
                        +- MicroBatchScan[value#19321] MemoryStreamDataSource
Raw output
sbt.ForkMain$ForkError: org.scalatest.exceptions.TestFailedException: 
Timed out waiting for stream: The code passed to failAfter did not complete within 120 seconds.
java.base/java.lang.Thread.getStackTrace(Thread.java:1619)
	org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:277)
	org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
	org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
	org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
	org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)
	org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7$adapted(StreamTest.scala:480)
	scala.collection.mutable.HashMap$Node.foreach(HashMap.scala:642)
	scala.collection.mutable.HashMap.foreach(HashMap.scala:504)
	org.apache.spark.sql.streaming.StreamTest.fetchStreamAnswer$1(StreamTest.scala:480)

	Caused by: 	null
	java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1764)
		org.apache.spark.sql.execution.streaming.StreamExecution.awaitOffset(StreamExecution.scala:481)
		org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$8(StreamTest.scala:482)
		scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
		org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
		org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
		org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
		org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
		org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
		org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)


== Progress ==
   StartStream(ProcessingTimeTrigger(0),org.apache.spark.util.SystemClock@5c603221,Map(spark.sql.shuffle.partitions -> 3),null)
   AddData to MemoryStream[value#19321]: 3,2,1,3
=> CheckLastBatch: [3,2],[2,1],[1,1]
   AssertOnQuery(<condition>, Check total state rows = List(3), updated state rows = List(3), rows dropped by watermark = List(0), removed state rows = Some(List(0)))
   AddData to MemoryStream[value#19321]: 1,4
   CheckLastBatch: [1,2],[4,1]
   AssertOnQuery(<condition>, Check operator progress metrics: operatorName = stateStoreSave, numShufflePartitions = 3, numStateStoreInstances = 3)

== Stream ==
Output Mode: Update
Stream state: {}
Thread state: alive
Thread stack trace: java.base@17.0.9/jdk.internal.misc.Unsafe.park(Native Method)
java.base@17.0.9/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
app//scala.concurrent.impl.Promise$DefaultPromise.tryAwait0(Promise.scala:243)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:255)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:104)
app//org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:342)
app//org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:983)
app//org.apache.spark.SparkContext.runJob(SparkContext.scala:2428)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2(WriteToDataSourceV2Exec.scala:386)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2$(WriteToDataSourceV2Exec.scala:360)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.writeWithV2(WriteToDataSourceV2Exec.scala:308)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.run(WriteToDataSourceV2Exec.scala:319)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
app//org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:4442)
app//org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:3674)
app//org.apache.spark.sql.Dataset$$Lambda$2326/0x00007fbcf12f0000.apply(Unknown Source)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4432)
app//org.apache.spark.sql.Dataset$$Lambda$2338/0x00007fbcf12f54a8.apply(Unknown Source)
app//org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4430)
app//org.apache.spark.sql.Dataset$$Lambda$2327/0x00007fbcf12f03d0.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2282/0x00007fbcf12d3be0.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2268/0x00007fbcf12d0b10.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:919)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.Dataset.withAction(Dataset.scala:4430)
app//org.apache.spark.sql.Dataset.collect(Dataset.scala:3674)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$17(MicroBatchExecution.scala:783)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2267/0x00007fbcf12d0850.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2282/0x00007fbcf12d3be0.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2268/0x00007fbcf12d0b10.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:919)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$16(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2265/0x00007fbcf12d0000.apply(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runBatch(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$2(MicroBatchExecution.scala:326)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1967/0x00007fbcf11f21b8.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$1(MicroBatchExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1964/0x00007fbcf11f1130.apply$mcZ$sp(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:279)
app//org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:311)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$Lambda$1955/0x00007fbcf11e8e58.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:919)
app//org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.$anonfun$run$1(StreamExecution.scala:211)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1$$Lambda$1951/0x00007fbcf11e76d0.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:211)


== Sink ==



== Plan ==
== Parsed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, c7dcd408-619e-4333-a8af-0050d070b913, Update, 0
+- Aggregate [value#19321], [value#19321, count(1) AS count(1)#19326L]
   +- StreamingDataSourceV2Relation [value#19321], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@5030e7ed, MemoryStream[value#19321], -1, 0

== Analyzed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, c7dcd408-619e-4333-a8af-0050d070b913, Update, 0
+- Aggregate [value#19321], [value#19321, count(1) AS count(1)#19326L]
   +- StreamingDataSourceV2Relation [value#19321], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@5030e7ed, MemoryStream[value#19321], -1, 0

== Optimized Logical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@c95101e]
+- Aggregate [value#19321], [value#19321, count(1) AS count(1)#19326L]
   +- StreamingDataSourceV2Relation [value#19321], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@5030e7ed, MemoryStream[value#19321], -1, 0

== Physical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@c95101e], org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$$Lambda$2211/0x00007fbcf129dfa0@7f4e59d6
+- *(4) HashAggregate(keys=[value#19321], functions=[count(1)], output=[value#19321, count(1)#19326L])
   +- StateStoreSave [value#19321], state info [ checkpoint = file:/home/runner/work/apache-spark/apache-spark/target/tmp/streaming.metadata-34c7ea0d-1112-40b1-ba50-e69024d50d1a/state, runId = c22f2402-e633-4634-a564-bc7b51bb20ac, opId = 0, ver = 0, numPartitions = 3], Update, 0, 0, 2
      +- *(3) HashAggregate(keys=[value#19321], functions=[merge_count(1)], output=[value#19321, count#19355L])
         +- StateStoreRestore [value#19321], state info [ checkpoint = file:/home/runner/work/apache-spark/apache-spark/target/tmp/streaming.metadata-34c7ea0d-1112-40b1-ba50-e69024d50d1a/state, runId = c22f2402-e633-4634-a564-bc7b51bb20ac, opId = 0, ver = 0, numPartitions = 3], 2
            +- *(2) HashAggregate(keys=[value#19321], functions=[merge_count(1)], output=[value#19321, count#19355L])
               +- Exchange hashpartitioning(value#19321, 3), ENSURE_REQUIREMENTS, [plan_id=88167]
                  +- *(1) HashAggregate(keys=[value#19321], functions=[partial_count(1)], output=[value#19321, count#19355L])
                     +- *(1) Project [value#19321]
                        +- MicroBatchScan[value#19321] MemoryStreamDataSource

         
         
	at org.scalatest.Assertions.newAssertionFailedException(Assertions.scala:472)
	at org.scalatest.Assertions.newAssertionFailedException$(Assertions.scala:471)
	at org.scalatest.funsuite.AnyFunSuite.newAssertionFailedException(AnyFunSuite.scala:1564)
	at org.scalatest.Assertions.fail(Assertions.scala:933)
	at org.scalatest.Assertions.fail$(Assertions.scala:929)
	at org.scalatest.funsuite.AnyFunSuite.fail(AnyFunSuite.scala:1564)
	at org.apache.spark.sql.streaming.StreamTest.failTest$1(StreamTest.scala:462)
	at org.apache.spark.sql.streaming.StreamTest.liftedTree1$1(StreamTest.scala:800)
	at org.apache.spark.sql.streaming.StreamTest.testStream(StreamTest.scala:776)
	at org.apache.spark.sql.streaming.StreamTest.testStream$(StreamTest.scala:342)
	at org.apache.spark.sql.streaming.StreamingAggregationSuite.testStream(StreamingAggregationSuite.scala:55)
	at org.apache.spark.sql.streaming.StreamingAggregationSuite.$anonfun$new$86(StreamingAggregationSuite.scala:836)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
	at org.apache.spark.sql.catalyst.SQLConfHelper.withSQLConf(SQLConfHelper.scala:56)
	at org.apache.spark.sql.catalyst.SQLConfHelper.withSQLConf$(SQLConfHelper.scala:38)
	at org.apache.spark.sql.streaming.StreamingAggregationSuite.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(StreamingAggregationSuite.scala:55)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:248)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:246)
	at org.apache.spark.sql.streaming.StreamingAggregationSuite.withSQLConf(StreamingAggregationSuite.scala:55)
	at org.apache.spark.sql.streaming.RocksDBStateStoreTest.$anonfun$test$1(RocksDBStateStoreTest.scala:39)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
	at org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
	at org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
	at org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
	at org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
	at org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
	at org.apache.spark.SparkFunSuite.$anonfun$test$2(SparkFunSuite.scala:155)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:226)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:227)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:224)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:218)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:69)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:69)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:269)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:333)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:269)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:268)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1564)
	at org.scalatest.Suite.run(Suite.scala:1114)
	at org.scalatest.Suite.run$(Suite.scala:1096)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1564)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:273)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:273)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:272)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:69)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:69)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:321)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:517)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:414)
	at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)