Skip to content

Commit

Permalink
Don't distinguish between types of ArithmeticException for Spark 3.…
Browse files Browse the repository at this point in the history
…2.x (#5483)

* do not distinguish java or spark exception in version 32X

Signed-off-by: remzi <13716567376yh@gmail.com>

* refine the error message

Signed-off-by: remzi <13716567376yh@gmail.com>
  • Loading branch information
HaoYang670 authored May 20, 2022
1 parent 29940af commit c9f3595
Showing 1 changed file with 23 additions and 5 deletions.
28 changes: 23 additions & 5 deletions integration_tests/src/main/python/arithmetic_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from marks import ignore_order, incompat, approximate_float, allow_non_gpu
from pyspark.sql.types import *
from pyspark.sql.types import IntegralType
from spark_session import with_cpu_session, with_gpu_session, with_spark_session, is_before_spark_320, is_before_spark_330, is_databricks91_or_later
from spark_session import with_cpu_session, with_gpu_session, with_spark_session, is_before_spark_320, is_before_spark_330, is_databricks91_or_later, is_spark_330_or_later
import pyspark.sql.functions as f
from datetime import timedelta

Expand All @@ -42,11 +42,13 @@
_decimal_gen_38_10 = DecimalGen(precision=38, scale=10)
_decimal_gen_38_neg10 = DecimalGen(precision=38, scale=-10)

_arith_decimal_gens_no_neg_scale = [
decimal_gen_32bit, _decimal_gen_7_7, decimal_gen_64bit, _decimal_gen_18_0, decimal_gen_128bit,
_arith_data_gens_diff_precision_scale_and_no_neg_scale = [
decimal_gen_32bit, decimal_gen_64bit, _decimal_gen_18_0, decimal_gen_128bit,
_decimal_gen_30_2, _decimal_gen_36_5, _decimal_gen_38_0, _decimal_gen_38_10
]

_arith_decimal_gens_no_neg_scale = _arith_data_gens_diff_precision_scale_and_no_neg_scale + [_decimal_gen_7_7]

_arith_decimal_gens = _arith_decimal_gens_no_neg_scale + [
decimal_gen_32bit_neg_scale, _decimal_gen_36_neg5, _decimal_gen_38_neg10
]
Expand Down Expand Up @@ -275,19 +277,35 @@ def test_mod_pmod_long_min_value():
'a % -1L'),
ansi_enabled_conf)

@pytest.mark.parametrize('data_gen', _arith_data_gens_no_neg_scale, ids=idfn)
@pytest.mark.parametrize('data_gen', _arith_data_gens_diff_precision_scale_and_no_neg_scale, ids=idfn)
@pytest.mark.parametrize('overflow_exp', [
'pmod(a, cast(0 as {}))',
'pmod(cast(-12 as {}), cast(0 as {}))',
'a % (cast(0 as {}))',
'cast(-12 as {}) % cast(0 as {})'], ids=idfn)
def test_mod_pmod_by_zero(data_gen, overflow_exp):
string_type = to_cast_string(data_gen.data_type)
exception_str = "java.lang.ArithmeticException: divide by zero" if is_before_spark_320() else \
"org.apache.spark.SparkArithmeticException: divide by zero"

assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
overflow_exp.format(string_type, string_type)).collect(),
ansi_enabled_conf,
"java.lang.ArithmeticException" if is_before_spark_320() else "org.apache.spark.SparkArithmeticException")
exception_str)

def test_cast_neg_to_decimal_err():
# -12 cannot be represented as decimal(7,7)
data_gen = _decimal_gen_7_7
exception_content = "Decimal(compact,-120000000,20,0}) cannot be represented as Decimal(7, 7)"
exception_str = "java.lang.ArithmeticException: " + exception_content if is_before_spark_330() else \
"org.apache.spark.SparkArithmeticException: " + exception_content

assert_gpu_and_cpu_error(
lambda spark : unary_op_df(spark, data_gen).selectExpr(
'cast(-12 as {})'.format(to_cast_string(data_gen.data_type))).collect(),
ansi_enabled_conf,
exception_str)

@pytest.mark.parametrize('data_gen', _arith_data_gens_no_neg_scale, ids=idfn)
def test_mod_pmod_by_zero_not_ansi(data_gen):
Expand Down

0 comments on commit c9f3595

Please # to comment.