mirror of
https://github.com/zebrajr/tensorflow.git
synced 2026-01-15 12:15:41 +00:00
Convert some tests to cover both eager and graph.
PiperOrigin-RevId: 165760364
This commit is contained in:
committed by
TensorFlower Gardener
parent
5ead76420d
commit
b6409594d3
@@ -46,6 +46,7 @@ from tensorflow.core.protobuf import rewriter_config_pb2
|
||||
from tensorflow.python import pywrap_tensorflow
|
||||
from tensorflow.python.client import device_lib
|
||||
from tensorflow.python.client import session
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import device as pydev
|
||||
from tensorflow.python.framework import errors
|
||||
from tensorflow.python.framework import ops
|
||||
@@ -273,6 +274,58 @@ def enable_c_api(fn):
|
||||
return lambda *args, **kwargs: _use_c_api_wrapper(fn, True, *args, **kwargs)
|
||||
|
||||
|
||||
def run_in_graph_and_eager_modes(__unused__=None, graph=None, config=None,
|
||||
use_gpu=False, force_gpu=False):
|
||||
"""Runs the test in both graph and eager modes.
|
||||
|
||||
Args:
|
||||
__unused__: Prevents sliently skipping tests.
|
||||
graph: Optional graph to use during the returned session.
|
||||
config: An optional config_pb2.ConfigProto to use to configure the
|
||||
session.
|
||||
use_gpu: If True, attempt to run as many ops as possible on GPU.
|
||||
force_gpu: If True, pin all ops to `/device:GPU:0`.
|
||||
|
||||
Returns:
|
||||
Returns a decorator that will run the decorated test function
|
||||
using both a graph and using eager execution.
|
||||
"""
|
||||
|
||||
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
|
||||
|
||||
def decorator(f):
|
||||
"""Test method decorator."""
|
||||
def decorated(self):
|
||||
"""Decorated the test method."""
|
||||
with context.graph_mode():
|
||||
with self.test_session(graph, config, use_gpu, force_gpu):
|
||||
f(self)
|
||||
|
||||
def run_eager_mode():
|
||||
if force_gpu:
|
||||
gpu_name = gpu_device_name()
|
||||
if not gpu_name:
|
||||
gpu_name = "/device:GPU:0"
|
||||
with context.device(gpu_name):
|
||||
f(self)
|
||||
elif use_gpu:
|
||||
# TODO(xpan): Support softplacement and gpu by default when available.
|
||||
f(self)
|
||||
else:
|
||||
with context.device("/device:CPU:0"):
|
||||
f(self)
|
||||
|
||||
with context.eager_mode():
|
||||
if graph is None:
|
||||
run_eager_mode()
|
||||
else:
|
||||
with graph.as_default():
|
||||
run_eager_mode()
|
||||
|
||||
return decorated
|
||||
return decorator
|
||||
|
||||
|
||||
class TensorFlowTestCase(googletest.TestCase):
|
||||
"""Base class for tests that need to test TensorFlow.
|
||||
"""
|
||||
@@ -386,6 +439,25 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
fail_msg += " : %r" % (msg) if msg else ""
|
||||
self.fail(fail_msg)
|
||||
|
||||
def evaluate(self, tensors):
|
||||
"""Evaluates tensors and returns numpy values.
|
||||
|
||||
Args:
|
||||
tensors: A Tensor or a list of Tensors.
|
||||
|
||||
Returns:
|
||||
tensors numpy values.
|
||||
"""
|
||||
if context.in_eager_mode():
|
||||
if isinstance(tensors, list):
|
||||
assert all(isinstance(t, ops.EagerTensor) for t in tensors)
|
||||
return [t.numpy() for t in tensors]
|
||||
assert isinstance(tensors, ops.EagerTensor), "Must be list or EagerTensor"
|
||||
return tensors.numpy()
|
||||
else:
|
||||
sess = ops.get_default_session()
|
||||
return sess.run(tensors)
|
||||
|
||||
# pylint: disable=g-doc-return-or-yield
|
||||
@contextlib.contextmanager
|
||||
def test_session(self,
|
||||
|
||||
@@ -26,6 +26,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_impl
|
||||
@@ -74,16 +75,16 @@ class SoftmaxTest(test_lib.TestCase):
|
||||
z = u.sum(1)[:, np.newaxis]
|
||||
return u / z
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes()
|
||||
def testSoftmax(self):
|
||||
x_shape = [5, 10]
|
||||
x_np = np.random.randn(*x_shape).astype(np.float32)
|
||||
y_np = self._softmax(x_np)
|
||||
with self.test_session():
|
||||
x_tf = constant_op.constant(x_np)
|
||||
y_tf = nn_ops.softmax(x_tf)
|
||||
y_tf_last_dim = nn_ops.softmax(x_tf, 1)
|
||||
y_tf_np = y_tf.eval()
|
||||
y_tf_last_dim_np = y_tf_last_dim.eval()
|
||||
x_tf = constant_op.constant(x_np)
|
||||
y_tf = nn_ops.softmax(x_tf)
|
||||
y_tf_last_dim = nn_ops.softmax(x_tf, 1)
|
||||
y_tf_np = self.evaluate(y_tf)
|
||||
y_tf_last_dim_np = self.evaluate(y_tf_last_dim)
|
||||
eps = 1e-3
|
||||
self.assertAllClose(y_tf_np, y_np, eps)
|
||||
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
|
||||
@@ -109,18 +110,18 @@ class LogPoissonLossTest(test_lib.TestCase):
|
||||
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
|
||||
return lpl
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes()
|
||||
def testLogPoissonLoss(self):
|
||||
x_shape = [5, 10]
|
||||
x_np = np.random.randn(*x_shape).astype(np.float32)
|
||||
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
|
||||
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
|
||||
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
|
||||
with self.test_session():
|
||||
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
|
||||
y_tf_stirling = nn_impl.log_poisson_loss(
|
||||
z_np, x_np, compute_full_loss=True)
|
||||
y_tf_np = y_tf.eval()
|
||||
y_tf_np_stirling = y_tf_stirling.eval()
|
||||
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
|
||||
y_tf_stirling = nn_impl.log_poisson_loss(
|
||||
z_np, x_np, compute_full_loss=True)
|
||||
y_tf_np = self.evaluate(y_tf)
|
||||
y_tf_np_stirling = self.evaluate(y_tf_stirling)
|
||||
eps = 1e-3
|
||||
self.assertAllClose(y_tf_np, y_np, eps)
|
||||
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
|
||||
@@ -151,14 +152,14 @@ class LogSoftmaxTest(test_lib.TestCase):
|
||||
u = x - m
|
||||
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes()
|
||||
def testLogSoftmax(self):
|
||||
x_shape = [5, 10]
|
||||
x_np = np.random.randn(*x_shape).astype(np.float32)
|
||||
y_np = self._log_softmax(x_np)
|
||||
with self.test_session():
|
||||
x_tf = constant_op.constant(x_np)
|
||||
y_tf = nn_ops.log_softmax(x_tf)
|
||||
y_tf_np = y_tf.eval()
|
||||
x_tf = constant_op.constant(x_np)
|
||||
y_tf = nn_ops.log_softmax(x_tf)
|
||||
y_tf_np = self.evaluate(y_tf)
|
||||
eps = 1e-3
|
||||
self.assertAllClose(y_tf_np, y_np, eps)
|
||||
|
||||
@@ -176,13 +177,13 @@ class LogSoftmaxTest(test_lib.TestCase):
|
||||
|
||||
class L2LossTest(test_lib.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes()
|
||||
def testL2Loss(self):
|
||||
for dtype in [dtypes.float32, dtypes.float64]:
|
||||
with self.test_session():
|
||||
x = constant_op.constant(
|
||||
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
|
||||
l2loss = nn_ops.l2_loss(x)
|
||||
value = l2loss.eval()
|
||||
x = constant_op.constant(
|
||||
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
|
||||
l2loss = nn_ops.l2_loss(x)
|
||||
value = self.evaluate(l2loss)
|
||||
self.assertAllClose(7.0, value)
|
||||
|
||||
def testGradient(self):
|
||||
@@ -210,27 +211,27 @@ class L2NormalizeTest(test_lib.TestCase):
|
||||
norm = np.apply_along_axis(np.linalg.norm, dim, x)
|
||||
return x / np.expand_dims(norm, dim)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes()
|
||||
def testL2Normalize(self):
|
||||
x_shape = [20, 7, 3]
|
||||
np.random.seed(1)
|
||||
x_np = np.random.random_sample(x_shape).astype(np.float32)
|
||||
for dim in range(len(x_shape)):
|
||||
y_np = self._l2Normalize(x_np, dim)
|
||||
with self.test_session():
|
||||
x_tf = constant_op.constant(x_np, name="x")
|
||||
y_tf = nn_impl.l2_normalize(x_tf, dim)
|
||||
self.assertAllClose(y_np, y_tf.eval())
|
||||
x_tf = constant_op.constant(x_np, name="x")
|
||||
y_tf = nn_impl.l2_normalize(x_tf, dim)
|
||||
self.assertAllClose(y_np, self.evaluate(y_tf))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes()
|
||||
def testL2NormalizeDimArray(self):
|
||||
x_shape = [20, 7, 3]
|
||||
np.random.seed(1)
|
||||
x_np = np.random.random_sample(x_shape).astype(np.float32)
|
||||
dim = [1, 2]
|
||||
y_np = self._l2Normalize(x_np, dim)
|
||||
with self.test_session():
|
||||
x_tf = constant_op.constant(x_np, name="x")
|
||||
y_tf = nn_impl.l2_normalize(x_tf, dim)
|
||||
self.assertAllClose(y_np, y_tf.eval())
|
||||
x_tf = constant_op.constant(x_np, name="x")
|
||||
y_tf = nn_impl.l2_normalize(x_tf, dim)
|
||||
self.assertAllClose(y_np, self.evaluate(y_tf))
|
||||
|
||||
def testL2NormalizeGradient(self):
|
||||
x_shape = [20, 7, 3]
|
||||
|
||||
Reference in New Issue
Block a user