Skip to content

Commit

Permalink
Merge pull request tensorflow#8302 from ayushmankumar7:absl
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 302043775
  • Loading branch information
tensorflower-gardener committed Mar 20, 2020
2 parents 2416dd9 + 55bf4b8 commit bd48885
Show file tree
Hide file tree
Showing 25 changed files with 120 additions and 110 deletions.
5 changes: 3 additions & 2 deletions official/benchmark/models/resnet_cifar_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from official.benchmark.models import resnet_cifar_model
Expand Down Expand Up @@ -100,7 +101,7 @@ def on_batch_begin(self, batch, logs=None):
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)

Expand Down Expand Up @@ -280,6 +281,6 @@ def main(_):


if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_cifar_flags()
app.run(main)
4 changes: 2 additions & 2 deletions official/benchmark/ncf_keras_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand All @@ -22,6 +21,7 @@
import time

from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf

Expand Down Expand Up @@ -51,7 +51,7 @@ def __init__(self,
def _setup(self):
"""Sets up and resets flags before each test."""
assert tf.version.VERSION.startswith('2.')
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
if NCFKerasBenchmarkBase.local_flags is None:
ncf_common.define_ncf_flags()
# Loads flags to get defaults to then override. List cannot be empty.
Expand Down
3 changes: 2 additions & 1 deletion official/modeling/model_training_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import os

from absl import logging
from absl.testing import parameterized
from absl.testing.absltest import mock
import numpy as np
Expand Down Expand Up @@ -125,7 +126,7 @@ def summaries_with_matching_keyword(keyword, summary_dir):
if event.summary is not None:
for value in event.summary.value:
if keyword in value.tag:
tf.compat.v1.logging.error(event)
logging.error(event)
yield event.summary


Expand Down
15 changes: 7 additions & 8 deletions official/nlp/transformer/translate.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from __future__ import division
from __future__ import print_function

from absl import logging
import numpy as np
import tensorflow as tf

Expand Down Expand Up @@ -117,8 +118,7 @@ def input_generator():
maxlen=params["decode_max_length"],
dtype="int32",
padding="post")
tf.compat.v1.logging.info("Decoding batch %d out of %d.", i,
num_decode_batches)
logging.info("Decoding batch %d out of %d.", i, num_decode_batches)
yield batch

@tf.function
Expand Down Expand Up @@ -172,16 +172,15 @@ def text_as_per_replica():
translation = _trim_and_decode(val_outputs[j], subtokenizer)
translations.append(translation)
if print_all_translations:
tf.compat.v1.logging.info(
"Translating:\n\tInput: %s\n\tOutput: %s" %
(sorted_inputs[j + i * batch_size], translation))
logging.info("Translating:\n\tInput: %s\n\tOutput: %s",
sorted_inputs[j + i * batch_size], translation)

# Write translations in the order they appeared in the original file.
if output_file is not None:
if tf.io.gfile.isdir(output_file):
raise ValueError("File output is a directory, will not save outputs to "
"file.")
tf.compat.v1.logging.info("Writing to file %s" % output_file)
logging.info("Writing to file %s", output_file)
with tf.compat.v1.gfile.Open(output_file, "w") as f:
for i in sorted_keys:
f.write("%s\n" % translations[i])
Expand All @@ -191,10 +190,10 @@ def translate_from_text(model, subtokenizer, txt):
encoded_txt = _encode_and_add_eos(txt, subtokenizer)
result = model.predict(encoded_txt)
outputs = result["outputs"]
tf.compat.v1.logging.info("Original: \"%s\"" % txt)
logging.info("Original: \"%s\"", txt)
translate_from_input(outputs, subtokenizer)


def translate_from_input(outputs, subtokenizer):
translation = _trim_and_decode(outputs, subtokenizer)
tf.compat.v1.logging.info("Translation: \"%s\"" % translation)
logging.info("Translation: \"%s\"", translation)
32 changes: 15 additions & 17 deletions official/nlp/transformer/utils/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import re
import sys
import unicodedata
from absl import logging

import numpy as np
import six
Expand Down Expand Up @@ -71,8 +72,7 @@ class Subtokenizer(object):

def __init__(self, vocab_file, reserved_tokens=None, master_char_set=None):
"""Initializes class, creating a vocab file if data_files is provided."""
tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." %
vocab_file)
logging.info("Initializing Subtokenizer from file %s.", vocab_file)

if master_char_set is None:
master_char_set = _ALPHANUMERIC_CHAR_SET
Expand Down Expand Up @@ -130,17 +130,17 @@ def init_from_files(vocab_file,
reserved_tokens = RESERVED_TOKENS

if tf.io.gfile.exists(vocab_file):
tf.compat.v1.logging.info("Vocab file already exists (%s)" % vocab_file)
logging.info("Vocab file already exists (%s)", vocab_file)
else:
tf.compat.v1.logging.info("Begin steps to create subtoken vocabulary...")
logging.info("Begin steps to create subtoken vocabulary...")
token_counts = _count_tokens(files, file_byte_limit, correct_strip,
master_char_set)
alphabet = _generate_alphabet_dict(token_counts)
subtoken_list = _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_vocab_size, threshold, min_count,
reserved_tokens)
tf.compat.v1.logging.info("Generated vocabulary with %d subtokens." %
len(subtoken_list))
logging.info("Generated vocabulary with %d subtokens.",
len(subtoken_list))
_save_vocab_file(vocab_file, subtoken_list)
return Subtokenizer(vocab_file, master_char_set=master_char_set)

Expand Down Expand Up @@ -439,23 +439,22 @@ def _generate_subtokens_with_target_vocab_size(token_counts,
reserved_tokens = RESERVED_TOKENS

if min_count is not None:
tf.compat.v1.logging.info(
"Using min_count=%d to generate vocab with target size %d" %
(min_count, target_size))
logging.info("Using min_count=%d to generate vocab with target size %d",
min_count, target_size)
return _generate_subtokens(
token_counts, alphabet, min_count, reserved_tokens=reserved_tokens)

def bisect(min_val, max_val):
"""Recursive function to binary search for subtoken vocabulary."""
cur_count = (min_val + max_val) // 2
tf.compat.v1.logging.info("Binary search: trying min_count=%d (%d %d)" %
(cur_count, min_val, max_val))
logging.info("Binary search: trying min_count=%d (%d %d)", cur_count,
min_val, max_val)
subtoken_list = _generate_subtokens(
token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens)

val = len(subtoken_list)
tf.compat.v1.logging.info(
"Binary search: min_count=%d resulted in %d tokens" % (cur_count, val))
logging.info("Binary search: min_count=%d resulted in %d tokens", cur_count,
val)

within_threshold = abs(val - target_size) < threshold
if within_threshold or min_val >= max_val or cur_count < 2:
Expand All @@ -471,8 +470,7 @@ def bisect(min_val, max_val):
return other_subtoken_list
return subtoken_list

tf.compat.v1.logging.info("Finding best min_count to get target size of %d" %
target_size)
logging.info("Finding best min_count to get target size of %d", target_size)
return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT)


Expand Down Expand Up @@ -644,7 +642,7 @@ def _generate_subtokens(token_counts,
# subtoken_dict, count how often the resulting subtokens appear, and update
# the dictionary with subtokens w/ high enough counts.
for i in xrange(num_iterations):
tf.compat.v1.logging.info("\tGenerating subtokens: iteration %d" % i)
logging.info("\tGenerating subtokens: iteration %d", i)
# Generate new subtoken->id dictionary using the new subtoken list.
subtoken_dict = _list_to_index_dict(subtoken_list)

Expand All @@ -658,5 +656,5 @@ def _generate_subtokens(token_counts,
subtoken_list, max_subtoken_length = _gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)

tf.compat.v1.logging.info("\tVocab size: %d" % len(subtoken_list))
logging.info("\tVocab size: %d", len(subtoken_list))
return subtoken_list
5 changes: 3 additions & 2 deletions official/r1/mnist/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@

from absl import app as absl_app
from absl import flags
from absl import logging
from six.moves import range
import tensorflow as tf # pylint: disable=g-bad-import-order
import tensorflow as tf

from official.r1.mnist import dataset
from official.utils.flags import core as flags_core
Expand Down Expand Up @@ -241,6 +242,6 @@ def main(_):


if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_mnist_flags()
absl_app.run(main)
4 changes: 2 additions & 2 deletions official/r1/mnist/mnist_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import unittest

import tensorflow as tf # pylint: disable=g-bad-import-order

from absl import logging
from official.r1.mnist import mnist
from official.utils.misc import keras_utils

Expand Down Expand Up @@ -143,5 +143,5 @@ def benchmark_train_step_time(self):


if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)
tf.test.main()
13 changes: 7 additions & 6 deletions official/r1/resnet/cifar10_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@

from absl import app as absl_app
from absl import flags
from absl import logging
from six.moves import range
import tensorflow as tf # pylint: disable=g-bad-import-order
import tensorflow as tf

from official.r1.resnet import resnet_model
from official.r1.resnet import resnet_run_loop
Expand Down Expand Up @@ -139,9 +140,9 @@ def input_fn(is_training,
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)

if input_context:
tf.compat.v1.logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % (
input_context.input_pipeline_id, input_context.num_input_pipelines))
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
input_context.input_pipeline_id, input_context.num_input_pipelines)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)

Expand Down Expand Up @@ -270,7 +271,7 @@ def run_cifar(flags_obj):
Dictionary of results. Including final accuracy.
"""
if flags_obj.image_bytes_as_serving_input:
tf.compat.v1.logging.fatal(
logging.fatal(
'--image_bytes_as_serving_input cannot be set to True for CIFAR. '
'This flag is only applicable to ImageNet.')
return
Expand All @@ -291,6 +292,6 @@ def main(_):


if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_cifar_flags()
absl_app.run(main)
5 changes: 3 additions & 2 deletions official/r1/resnet/cifar10_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,15 @@

from tempfile import mkstemp

from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
import tensorflow as tf

from official.r1.resnet import cifar10_main
from official.utils.misc import keras_utils
from official.utils.testing import integration

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)

_BATCH_SIZE = 128
_HEIGHT = 32
Expand Down
3 changes: 2 additions & 1 deletion official/r1/resnet/estimator_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import time

from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf # pylint: disable=g-bad-import-order

Expand Down Expand Up @@ -56,7 +57,7 @@ def _get_model_dir(self, folder_name):

def _setup(self):
"""Sets up and resets flags before each test."""
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
if EstimatorBenchmark.local_flags is None:
for flag_method in self.flag_methods:
flag_method()
Expand Down
9 changes: 5 additions & 4 deletions official/r1/resnet/imagenet_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

from absl import app as absl_app
from absl import flags
from absl import logging
from six.moves import range
import tensorflow as tf

Expand Down Expand Up @@ -194,9 +195,9 @@ def input_fn(is_training,
dataset = tf.data.Dataset.from_tensor_slices(filenames)

if input_context:
tf.compat.v1.logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % (
input_context.input_pipeline_id, input_context.num_input_pipelines))
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
input_context.input_pipeline_id, input_context.num_input_pipelines)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)

Expand Down Expand Up @@ -387,6 +388,6 @@ def main(_):


if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_imagenet_flags()
absl_app.run(main)
3 changes: 2 additions & 1 deletion official/r1/resnet/imagenet_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,13 @@
import unittest

import tensorflow as tf # pylint: disable=g-bad-import-order
from absl import logging

from official.r1.resnet import imagenet_main
from official.utils.misc import keras_utils
from official.utils.testing import integration

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)

_BATCH_SIZE = 32
_LABEL_CLASSES = 1001
Expand Down
Loading

0 comments on commit bd48885

Please sign in to comment.