Skip to content

Commit

Permalink
Update test tolerance
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 561468335
  • Loading branch information
ursk authored and tensorflower-gardener committed Aug 30, 2023
1 parent 8cce50a commit 53d1142
Showing 1 changed file with 22 additions and 12 deletions.
34 changes: 22 additions & 12 deletions tensorflow_probability/python/internal/backend/numpy/numpy_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1120,7 +1120,9 @@ def _not_implemented(*args, **kwargs):
xla_const_args=(1,)),
TestCase(
'math.reduce_prod', [
array_axis_tuples(allow_multi_axis=True),
array_axis_tuples(
# TODO(b/298224187) TF produces 0, np NaN for large elements.
elements=floats(-1e6, 1e6), allow_multi_axis=True),
array_axis_tuples(dtype=np.int32, allow_multi_axis=True)
],
xla_const_args=(1,)),
Expand Down Expand Up @@ -1175,7 +1177,7 @@ def _not_implemented(*args, **kwargs):
allow_nan=False,
allow_infinity=False))
],
xla_rtol=1e-4),
atol=1e-4),
TestCase('math.softmax', [
single_arrays(
shape=shapes(min_dims=1),
Expand Down Expand Up @@ -1217,8 +1219,12 @@ def _not_implemented(*args, **kwargs):
# keywords=None, defaults=(0, False, False, None))
TestCase(
'math.cumprod', [
hps.tuples(array_axis_tuples(), hps.booleans(),
hps.booleans()).map(lambda x: x[0] + (x[1], x[2]))
hps.tuples(
array_axis_tuples(
# TODO(b/298224187) TF produces 0, np NaN for large inputs.
elements=floats(min_value=-1e12, max_value=1e12)),
hps.booleans(),
hps.booleans()).map(lambda x: x[0] + (x[1], x[2]))
],
xla_const_args=(1, 2, 3)),
TestCase(
Expand Down Expand Up @@ -1260,9 +1266,11 @@ def _not_implemented(*args, **kwargs):
]),
TestCase('math.abs', [single_arrays()]),
TestCase('math.acos', [single_arrays(elements=floats(-1., 1.))]),
TestCase('math.acosh', [single_arrays(elements=positive_floats())]),
TestCase('math.acosh', [single_arrays(elements=positive_floats())],
atol=1e-4),
TestCase('math.asin', [single_arrays(elements=floats(-1., 1.))]),
TestCase('math.asinh', [single_arrays(elements=positive_floats())]),
TestCase('math.asinh', [single_arrays(elements=positive_floats())],
atol=1e-4),
TestCase('math.atan', [single_arrays()]),
TestCase('math.atanh', [single_arrays(elements=floats(-1., 1.))]),
TestCase(
Expand Down Expand Up @@ -1296,7 +1304,8 @@ def _not_implemented(*args, **kwargs):
TestCase('math.is_inf', [single_arrays()]),
TestCase('math.is_nan', [single_arrays()]),
TestCase('math.lgamma', [single_arrays(elements=positive_floats())]),
TestCase('math.log', [single_arrays(elements=positive_floats())]),
TestCase('math.log', [single_arrays(elements=positive_floats())],
atol=1e-4),
TestCase('math.log1p',
[single_arrays(elements=floats(min_value=-1 + 1e-6))],
xla_atol=1e-4, xla_rtol=1e-4),
Expand All @@ -1316,11 +1325,11 @@ def _not_implemented(*args, **kwargs):
TestCase('math.sign', [single_arrays()]),
TestCase('math.sin', [single_arrays()]),
TestCase('math.sinh', [single_arrays(elements=floats(-100., 100.))]),
TestCase('math.softplus', [single_arrays()]),
TestCase('math.softplus', [single_arrays()], atol=1e-4),
TestCase('math.sqrt', [single_arrays(elements=positive_floats())]),
TestCase('math.square', [single_arrays()]),
TestCase('math.tan', [single_arrays()]),
TestCase('math.tanh', [single_arrays()]),
TestCase('math.tanh', [single_arrays()], atol=1e-4),

# ArgSpec(args=['x', 'q', 'name'], varargs=None, keywords=None,
# defaults=(None,))
Expand Down Expand Up @@ -1367,9 +1376,11 @@ def _not_implemented(*args, **kwargs):
TestCase('math.xdivy',
[n_same_shape(n=2, elements=[floats(), non_zero_floats()])]),
TestCase('math.xlogy',
[n_same_shape(n=2, elements=[floats(), positive_floats()])]),
[n_same_shape(n=2, elements=[floats(), positive_floats()])],
atol=1e-4),
TestCase('math.xlog1py',
[n_same_shape(n=2, elements=[floats(), positive_floats()])]),
[n_same_shape(n=2, elements=[floats(), positive_floats()])],
atol=1e-4),
TestCase('nn.conv2d', [conv2d_params()], disabled=NUMPY_MODE),
TestCase(
'nn.sparse_softmax_cross_entropy_with_logits', [sparse_xent_params()],
Expand Down Expand Up @@ -1993,7 +2004,6 @@ def assert_same_dtype(x, y):
tensorflow_value = post_processor(tensorflow_value)

if assert_shape_only:

def assert_same_shape(x, y):
self.assertAllEqual(x.shape, y.shape)

Expand Down

0 comments on commit 53d1142

Please sign in to comment.