From e908bcf9e31a65c703046985f58c4d161c6faddd Mon Sep 17 00:00:00 2001 From: Afroz Mohiuddin Date: Fri, 10 Jan 2020 11:55:19 -0800 Subject: [PATCH] In files where we explicitly import tf.compat.v1, don't qualify access of tf.compat.v1 again. PiperOrigin-RevId: 289135863 --- tensor2tensor/data_generators/generator_utils.py | 8 ++++---- tensor2tensor/data_generators/problem_test.py | 2 +- tensor2tensor/layers/common_video_test.py | 2 +- tensor2tensor/layers/discretization_test.py | 2 +- tensor2tensor/layers/latent_layers_test.py | 2 +- tensor2tensor/layers/modalities_test.py | 2 +- tensor2tensor/layers/ngram_test.py | 2 +- tensor2tensor/utils/adafactor.py | 2 +- tensor2tensor/utils/multistep_optimizer.py | 2 +- tensor2tensor/utils/optimize.py | 2 +- tensor2tensor/utils/t2t_model_test.py | 2 +- tensor2tensor/utils/test_utils.py | 4 ++-- tensor2tensor/utils/test_utils_test.py | 3 +-- tensor2tensor/utils/yellowfin.py | 6 +++--- 14 files changed, 20 insertions(+), 21 deletions(-) diff --git a/tensor2tensor/data_generators/generator_utils.py b/tensor2tensor/data_generators/generator_utils.py index 1fe678e14..0b532caf9 100644 --- a/tensor2tensor/data_generators/generator_utils.py +++ b/tensor2tensor/data_generators/generator_utils.py @@ -867,18 +867,18 @@ def dict_pack(example): def _standardize(self, dataset, keys): """Force dataset structure into a tuple of Tensors.""" - shapes = tf.compat.v1.data.get_output_shapes(dataset) + shapes = tf.data.get_output_shapes(dataset) if isinstance(shapes, dict): keys = keys or tuple(shapes.keys()) dataset = dataset.map(lambda x: tuple(x[k] for k in keys)) - shapes = tf.compat.v1.data.get_output_shapes(dataset) + shapes = tf.data.get_output_shapes(dataset) if not all(isinstance(i, tf.TensorShape) for i in shapes): # Internally this class expects tuples of Tensors, even for the degenerate # case of a single sequence. dataset = dataset.map(lambda x: (x,)) - shapes = tf.compat.v1.data.get_output_shapes(dataset) + shapes = tf.data.get_output_shapes(dataset) for s in shapes: if not s.is_compatible_with(tf.TensorShape([None])): @@ -890,7 +890,7 @@ def _standardize(self, dataset, keys): if self._chop_long_sequences and len(shapes) != 1: raise ValueError("chop_long_sequences expects a single sequence dataset.") - token_types = tf.compat.v1.data.get_output_types(dataset) + token_types = tf.data.get_output_types(dataset) if len(set(token_types)) > 1: raise ValueError("Inconsistent dtypes: {}".format(token_types)) diff --git a/tensor2tensor/data_generators/problem_test.py b/tensor2tensor/data_generators/problem_test.py index 6fdd94521..f0f7b99f4 100644 --- a/tensor2tensor/data_generators/problem_test.py +++ b/tensor2tensor/data_generators/problem_test.py @@ -30,7 +30,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() def assert_tensors_equal(sess, t1, t2, n): diff --git a/tensor2tensor/layers/common_video_test.py b/tensor2tensor/layers/common_video_test.py index b7188cd34..90ce75b7d 100644 --- a/tensor2tensor/layers/common_video_test.py +++ b/tensor2tensor/layers/common_video_test.py @@ -26,7 +26,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class CommonVideoTest(parameterized.TestCase, tf.test.TestCase): diff --git a/tensor2tensor/layers/discretization_test.py b/tensor2tensor/layers/discretization_test.py index 5d4eec265..d0454e2e1 100644 --- a/tensor2tensor/layers/discretization_test.py +++ b/tensor2tensor/layers/discretization_test.py @@ -24,7 +24,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class DiscretizationTest(tf.test.TestCase): diff --git a/tensor2tensor/layers/latent_layers_test.py b/tensor2tensor/layers/latent_layers_test.py index 097465117..59e6ca506 100644 --- a/tensor2tensor/layers/latent_layers_test.py +++ b/tensor2tensor/layers/latent_layers_test.py @@ -28,7 +28,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() def imagetransformer_latent_tiny(): diff --git a/tensor2tensor/layers/modalities_test.py b/tensor2tensor/layers/modalities_test.py index 393c558aa..adbb86414 100644 --- a/tensor2tensor/layers/modalities_test.py +++ b/tensor2tensor/layers/modalities_test.py @@ -26,7 +26,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class ModalityTest(tf.test.TestCase): diff --git a/tensor2tensor/layers/ngram_test.py b/tensor2tensor/layers/ngram_test.py index 0233722e5..958788f23 100644 --- a/tensor2tensor/layers/ngram_test.py +++ b/tensor2tensor/layers/ngram_test.py @@ -24,7 +24,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class NGramTest(tf.test.TestCase): diff --git a/tensor2tensor/utils/adafactor.py b/tensor2tensor/utils/adafactor.py index 58525a740..87617ecd9 100644 --- a/tensor2tensor/utils/adafactor.py +++ b/tensor2tensor/utils/adafactor.py @@ -24,7 +24,7 @@ import tensorflow.compat.v1 as tf -class AdafactorOptimizer(tf.compat.v1.train.Optimizer): +class AdafactorOptimizer(tf.train.Optimizer): """Optimizer that implements the Adafactor algorithm. Adafactor is described in https://arxiv.org/abs/1804.04235. diff --git a/tensor2tensor/utils/multistep_optimizer.py b/tensor2tensor/utils/multistep_optimizer.py index f745a5d40..2367c8437 100644 --- a/tensor2tensor/utils/multistep_optimizer.py +++ b/tensor2tensor/utils/multistep_optimizer.py @@ -29,7 +29,7 @@ import tensorflow.compat.v1 as tf -class MultistepAdamOptimizer(tf.compat.v1.train.AdamOptimizer): +class MultistepAdamOptimizer(tf.train.AdamOptimizer): """Adam with SGD updates every n steps with accumulated gradients.""" def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, diff --git a/tensor2tensor/utils/optimize.py b/tensor2tensor/utils/optimize.py index b0d848ac9..a88f31ad5 100644 --- a/tensor2tensor/utils/optimize.py +++ b/tensor2tensor/utils/optimize.py @@ -184,7 +184,7 @@ def _register_base_optimizer(name, opt): _register_base_optimizer(_name, _opt) -class ConditionalOptimizer(tf.compat.v1.train.Optimizer): +class ConditionalOptimizer(tf.train.Optimizer): """Conditional optimizer.""" def __init__(self, optimizer_name, lr, hparams, use_tpu=False): # pylint: disable=super-init-not-called diff --git a/tensor2tensor/utils/t2t_model_test.py b/tensor2tensor/utils/t2t_model_test.py index a204b7cc6..7a8423996 100644 --- a/tensor2tensor/utils/t2t_model_test.py +++ b/tensor2tensor/utils/t2t_model_test.py @@ -25,7 +25,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class T2TModelTest(tf.test.TestCase): diff --git a/tensor2tensor/utils/test_utils.py b/tensor2tensor/utils/test_utils.py index 81fcd5a6f..56912b439 100644 --- a/tensor2tensor/utils/test_utils.py +++ b/tensor2tensor/utils/test_utils.py @@ -39,7 +39,7 @@ def run_in_graph_and_eager_modes(func=None, For example, consider the following unittest: ```python - tf.compat.v1.enable_eager_execution() + tf.enable_eager_execution() class SomeTest(tf.test.TestCase): @@ -120,5 +120,5 @@ def decorated(self, *args, **kwargs): def test_main(): - tf.compat.v1.enable_eager_execution() + tf.enable_eager_execution() tf.test.main() diff --git a/tensor2tensor/utils/test_utils_test.py b/tensor2tensor/utils/test_utils_test.py index 0b29391de..f5f949701 100644 --- a/tensor2tensor/utils/test_utils_test.py +++ b/tensor2tensor/utils/test_utils_test.py @@ -22,8 +22,7 @@ from tensor2tensor.utils import test_utils import tensorflow.compat.v1 as tf - -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class RunInGraphAndEagerTest(tf.test.TestCase): diff --git a/tensor2tensor/utils/yellowfin.py b/tensor2tensor/utils/yellowfin.py index eb50ad01b..d95820eae 100644 --- a/tensor2tensor/utils/yellowfin.py +++ b/tensor2tensor/utils/yellowfin.py @@ -22,9 +22,9 @@ # Values for gate_gradients. -GATE_NONE = tf.compat.v1.train.Optimizer.GATE_NONE -GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP -GATE_GRAPH = tf.compat.v1.train.Optimizer.GATE_GRAPH +GATE_NONE = tf.train.Optimizer.GATE_NONE +GATE_OP = tf.train.Optimizer.GATE_OP +GATE_GRAPH = tf.train.Optimizer.GATE_GRAPH class YellowFinOptimizer(object):