diff --git a/dev/run-tests-jenkins.py b/dev/run-tests-jenkins.py index 72e32d4e16e14..13be9592d771f 100755 --- a/dev/run-tests-jenkins.py +++ b/dev/run-tests-jenkins.py @@ -198,7 +198,7 @@ def main(): # format: http://linux.die.net/man/1/timeout # must be less than the timeout configured on Jenkins. Usually Jenkins's timeout is higher # then this. Please consult with the build manager or a committer when it should be increased. - tests_timeout = "400m" + tests_timeout = "500m" # Array to capture all test names to run on the pull request. These tests are represented # by their file equivalents in the dev/tests/ directory. diff --git a/python/pyspark/sql/tests/test_arrow.py b/python/pyspark/sql/tests/test_arrow.py index c3c9fb0f12a25..c59765dd79eb9 100644 --- a/python/pyspark/sql/tests/test_arrow.py +++ b/python/pyspark/sql/tests/test_arrow.py @@ -435,11 +435,12 @@ def test_createDateFrame_with_category_type(self): assert_frame_equal(result_spark, result_arrow) # ensure original category elements are string - assert isinstance(category_first_element, str) + self.assertIsInstance(category_first_element, str) # spark data frame and arrow execution mode enabled data frame type must match pandas - assert spark_type == arrow_type == 'string' - assert isinstance(arrow_first_category_element, str) - assert isinstance(spark_first_category_element, str) + self.assertEqual(spark_type, 'string') + self.assertEqual(arrow_type, 'string') + self.assertIsInstance(arrow_first_category_element, str) + self.assertIsInstance(spark_first_category_element, str) @unittest.skipIf( diff --git a/python/pyspark/sql/tests/test_pandas_udf_scalar.py b/python/pyspark/sql/tests/test_pandas_udf_scalar.py index ae6b8d520f735..2d38efd39f902 100644 --- a/python/pyspark/sql/tests/test_pandas_udf_scalar.py +++ b/python/pyspark/sql/tests/test_pandas_udf_scalar.py @@ -910,13 +910,10 @@ def to_category_func(x): spark_type = df.dtypes[1][1] # spark data frame and arrow execution mode enabled data frame type must match pandas - assert spark_type == 'string' + self.assertEqual(spark_type, 'string') - # Check result value of column 'B' must be equal to column 'A' - for i in range(0, len(result_spark["A"])): - assert result_spark["A"][i] == result_spark["B"][i] - assert isinstance(result_spark["A"][i], str) - assert isinstance(result_spark["B"][i], str) + # Check result of column 'B' must be equal to column 'A' in type and values + pd.testing.assert_series_equal(result_spark["A"], result_spark["B"], check_names=False) @unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.") def test_type_annotation(self):