diff --git a/official/benchmark/models/resnet_cifar_main.py b/official/benchmark/models/resnet_cifar_main.py index e2d69b55372..87cf96d6416 100644 --- a/official/benchmark/models/resnet_cifar_main.py +++ b/official/benchmark/models/resnet_cifar_main.py @@ -20,6 +20,7 @@ from absl import app from absl import flags +from absl import logging import numpy as np import tensorflow as tf from official.benchmark.models import resnet_cifar_model @@ -100,7 +101,7 @@ def on_batch_begin(self, batch, logs=None): if lr != self.prev_lr: self.model.optimizer.learning_rate = lr # lr should be a float here self.prev_lr = lr - tf.compat.v1.logging.debug( + logging.debug( 'Epoch %05d Batch %05d: LearningRateBatchScheduler ' 'change learning rate to %s.', self.epochs, batch, lr) @@ -280,6 +281,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_cifar_flags() app.run(main) diff --git a/official/benchmark/ncf_keras_benchmark.py b/official/benchmark/ncf_keras_benchmark.py index 1d66b52199a..5622eeec8e0 100644 --- a/official/benchmark/ncf_keras_benchmark.py +++ b/official/benchmark/ncf_keras_benchmark.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================== """Executes Keras benchmarks and accuracy tests.""" - from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -22,6 +21,7 @@ import time from absl import flags +from absl import logging from absl.testing import flagsaver import tensorflow as tf @@ -51,7 +51,7 @@ def __init__(self, def _setup(self): """Sets up and resets flags before each test.""" assert tf.version.VERSION.startswith('2.') - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) if NCFKerasBenchmarkBase.local_flags is None: ncf_common.define_ncf_flags() # Loads flags to get defaults to then override. List cannot be empty. diff --git a/official/modeling/model_training_utils_test.py b/official/modeling/model_training_utils_test.py index 647d6b9815c..e4e787a6439 100644 --- a/official/modeling/model_training_utils_test.py +++ b/official/modeling/model_training_utils_test.py @@ -20,6 +20,7 @@ import os +from absl import logging from absl.testing import parameterized from absl.testing.absltest import mock import numpy as np @@ -125,7 +126,7 @@ def summaries_with_matching_keyword(keyword, summary_dir): if event.summary is not None: for value in event.summary.value: if keyword in value.tag: - tf.compat.v1.logging.error(event) + logging.error(event) yield event.summary diff --git a/official/nlp/transformer/translate.py b/official/nlp/transformer/translate.py index 6a89e9bcf33..1f92504142e 100644 --- a/official/nlp/transformer/translate.py +++ b/official/nlp/transformer/translate.py @@ -18,6 +18,7 @@ from __future__ import division from __future__ import print_function +from absl import logging import numpy as np import tensorflow as tf @@ -117,8 +118,7 @@ def input_generator(): maxlen=params["decode_max_length"], dtype="int32", padding="post") - tf.compat.v1.logging.info("Decoding batch %d out of %d.", i, - num_decode_batches) + logging.info("Decoding batch %d out of %d.", i, num_decode_batches) yield batch @tf.function @@ -172,16 +172,15 @@ def text_as_per_replica(): translation = _trim_and_decode(val_outputs[j], subtokenizer) translations.append(translation) if print_all_translations: - tf.compat.v1.logging.info( - "Translating:\n\tInput: %s\n\tOutput: %s" % - (sorted_inputs[j + i * batch_size], translation)) + logging.info("Translating:\n\tInput: %s\n\tOutput: %s", + sorted_inputs[j + i * batch_size], translation) # Write translations in the order they appeared in the original file. if output_file is not None: if tf.io.gfile.isdir(output_file): raise ValueError("File output is a directory, will not save outputs to " "file.") - tf.compat.v1.logging.info("Writing to file %s" % output_file) + logging.info("Writing to file %s", output_file) with tf.compat.v1.gfile.Open(output_file, "w") as f: for i in sorted_keys: f.write("%s\n" % translations[i]) @@ -191,10 +190,10 @@ def translate_from_text(model, subtokenizer, txt): encoded_txt = _encode_and_add_eos(txt, subtokenizer) result = model.predict(encoded_txt) outputs = result["outputs"] - tf.compat.v1.logging.info("Original: \"%s\"" % txt) + logging.info("Original: \"%s\"", txt) translate_from_input(outputs, subtokenizer) def translate_from_input(outputs, subtokenizer): translation = _trim_and_decode(outputs, subtokenizer) - tf.compat.v1.logging.info("Translation: \"%s\"" % translation) + logging.info("Translation: \"%s\"", translation) diff --git a/official/nlp/transformer/utils/tokenizer.py b/official/nlp/transformer/utils/tokenizer.py index 8beab204daf..3749dfe9de6 100644 --- a/official/nlp/transformer/utils/tokenizer.py +++ b/official/nlp/transformer/utils/tokenizer.py @@ -22,6 +22,7 @@ import re import sys import unicodedata +from absl import logging import numpy as np import six @@ -71,8 +72,7 @@ class Subtokenizer(object): def __init__(self, vocab_file, reserved_tokens=None, master_char_set=None): """Initializes class, creating a vocab file if data_files is provided.""" - tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." % - vocab_file) + logging.info("Initializing Subtokenizer from file %s.", vocab_file) if master_char_set is None: master_char_set = _ALPHANUMERIC_CHAR_SET @@ -130,17 +130,17 @@ def init_from_files(vocab_file, reserved_tokens = RESERVED_TOKENS if tf.io.gfile.exists(vocab_file): - tf.compat.v1.logging.info("Vocab file already exists (%s)" % vocab_file) + logging.info("Vocab file already exists (%s)", vocab_file) else: - tf.compat.v1.logging.info("Begin steps to create subtoken vocabulary...") + logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit, correct_strip, master_char_set) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) - tf.compat.v1.logging.info("Generated vocabulary with %d subtokens." % - len(subtoken_list)) + logging.info("Generated vocabulary with %d subtokens.", + len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file, master_char_set=master_char_set) @@ -439,23 +439,22 @@ def _generate_subtokens_with_target_vocab_size(token_counts, reserved_tokens = RESERVED_TOKENS if min_count is not None: - tf.compat.v1.logging.info( - "Using min_count=%d to generate vocab with target size %d" % - (min_count, target_size)) + logging.info("Using min_count=%d to generate vocab with target size %d", + min_count, target_size) return _generate_subtokens( token_counts, alphabet, min_count, reserved_tokens=reserved_tokens) def bisect(min_val, max_val): """Recursive function to binary search for subtoken vocabulary.""" cur_count = (min_val + max_val) // 2 - tf.compat.v1.logging.info("Binary search: trying min_count=%d (%d %d)" % - (cur_count, min_val, max_val)) + logging.info("Binary search: trying min_count=%d (%d %d)", cur_count, + min_val, max_val) subtoken_list = _generate_subtokens( token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens) val = len(subtoken_list) - tf.compat.v1.logging.info( - "Binary search: min_count=%d resulted in %d tokens" % (cur_count, val)) + logging.info("Binary search: min_count=%d resulted in %d tokens", cur_count, + val) within_threshold = abs(val - target_size) < threshold if within_threshold or min_val >= max_val or cur_count < 2: @@ -471,8 +470,7 @@ def bisect(min_val, max_val): return other_subtoken_list return subtoken_list - tf.compat.v1.logging.info("Finding best min_count to get target size of %d" % - target_size) + logging.info("Finding best min_count to get target size of %d", target_size) return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT) @@ -644,7 +642,7 @@ def _generate_subtokens(token_counts, # subtoken_dict, count how often the resulting subtokens appear, and update # the dictionary with subtokens w/ high enough counts. for i in xrange(num_iterations): - tf.compat.v1.logging.info("\tGenerating subtokens: iteration %d" % i) + logging.info("\tGenerating subtokens: iteration %d", i) # Generate new subtoken->id dictionary using the new subtoken list. subtoken_dict = _list_to_index_dict(subtoken_list) @@ -658,5 +656,5 @@ def _generate_subtokens(token_counts, subtoken_list, max_subtoken_length = _gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) - tf.compat.v1.logging.info("\tVocab size: %d" % len(subtoken_list)) + logging.info("\tVocab size: %d", len(subtoken_list)) return subtoken_list diff --git a/official/r1/mnist/mnist.py b/official/r1/mnist/mnist.py index 7403e3f8366..9963a430cbd 100644 --- a/official/r1/mnist/mnist.py +++ b/official/r1/mnist/mnist.py @@ -19,8 +19,9 @@ from absl import app as absl_app from absl import flags +from absl import logging from six.moves import range -import tensorflow as tf # pylint: disable=g-bad-import-order +import tensorflow as tf from official.r1.mnist import dataset from official.utils.flags import core as flags_core @@ -241,6 +242,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_mnist_flags() absl_app.run(main) diff --git a/official/r1/mnist/mnist_test.py b/official/r1/mnist/mnist_test.py index 9c5b6dc393a..207c0d0bbff 100644 --- a/official/r1/mnist/mnist_test.py +++ b/official/r1/mnist/mnist_test.py @@ -21,7 +21,7 @@ import unittest import tensorflow as tf # pylint: disable=g-bad-import-order - +from absl import logging from official.r1.mnist import mnist from official.utils.misc import keras_utils @@ -143,5 +143,5 @@ def benchmark_train_step_time(self): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + logging.set_verbosity(logging.ERROR) tf.test.main() diff --git a/official/r1/resnet/cifar10_main.py b/official/r1/resnet/cifar10_main.py index fa6a2f0d6f1..63b23642210 100644 --- a/official/r1/resnet/cifar10_main.py +++ b/official/r1/resnet/cifar10_main.py @@ -22,8 +22,9 @@ from absl import app as absl_app from absl import flags +from absl import logging from six.moves import range -import tensorflow as tf # pylint: disable=g-bad-import-order +import tensorflow as tf from official.r1.resnet import resnet_model from official.r1.resnet import resnet_run_loop @@ -139,9 +140,9 @@ def input_fn(is_training, dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES) if input_context: - tf.compat.v1.logging.info( - 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % ( - input_context.input_pipeline_id, input_context.num_input_pipelines)) + logging.info( + 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d', + input_context.input_pipeline_id, input_context.num_input_pipelines) dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) @@ -270,7 +271,7 @@ def run_cifar(flags_obj): Dictionary of results. Including final accuracy. """ if flags_obj.image_bytes_as_serving_input: - tf.compat.v1.logging.fatal( + logging.fatal( '--image_bytes_as_serving_input cannot be set to True for CIFAR. ' 'This flag is only applicable to ImageNet.') return @@ -291,6 +292,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_cifar_flags() absl_app.run(main) diff --git a/official/r1/resnet/cifar10_test.py b/official/r1/resnet/cifar10_test.py index 7bd88715727..827cb0c4f1e 100644 --- a/official/r1/resnet/cifar10_test.py +++ b/official/r1/resnet/cifar10_test.py @@ -19,14 +19,15 @@ from tempfile import mkstemp +from absl import logging import numpy as np -import tensorflow as tf # pylint: disable=g-bad-import-order +import tensorflow as tf from official.r1.resnet import cifar10_main from official.utils.misc import keras_utils from official.utils.testing import integration -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) _BATCH_SIZE = 128 _HEIGHT = 32 diff --git a/official/r1/resnet/estimator_benchmark.py b/official/r1/resnet/estimator_benchmark.py index 18056fa5830..eef3ea467e3 100644 --- a/official/r1/resnet/estimator_benchmark.py +++ b/official/r1/resnet/estimator_benchmark.py @@ -21,6 +21,7 @@ import time from absl import flags +from absl import logging from absl.testing import flagsaver import tensorflow as tf # pylint: disable=g-bad-import-order @@ -56,7 +57,7 @@ def _get_model_dir(self, folder_name): def _setup(self): """Sets up and resets flags before each test.""" - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) if EstimatorBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method() diff --git a/official/r1/resnet/imagenet_main.py b/official/r1/resnet/imagenet_main.py index 0bbb9fe9fb9..dcfa9ce655f 100644 --- a/official/r1/resnet/imagenet_main.py +++ b/official/r1/resnet/imagenet_main.py @@ -22,6 +22,7 @@ from absl import app as absl_app from absl import flags +from absl import logging from six.moves import range import tensorflow as tf @@ -194,9 +195,9 @@ def input_fn(is_training, dataset = tf.data.Dataset.from_tensor_slices(filenames) if input_context: - tf.compat.v1.logging.info( - 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % ( - input_context.input_pipeline_id, input_context.num_input_pipelines)) + logging.info( + 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d', + input_context.input_pipeline_id, input_context.num_input_pipelines) dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) @@ -387,6 +388,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_imagenet_flags() absl_app.run(main) diff --git a/official/r1/resnet/imagenet_test.py b/official/r1/resnet/imagenet_test.py index 5a7ddcd686f..7d33ef098dd 100644 --- a/official/r1/resnet/imagenet_test.py +++ b/official/r1/resnet/imagenet_test.py @@ -20,12 +20,13 @@ import unittest import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging from official.r1.resnet import imagenet_main from official.utils.misc import keras_utils from official.utils.testing import integration -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) _BATCH_SIZE = 32 _LABEL_CLASSES = 1001 diff --git a/official/r1/resnet/resnet_run_loop.py b/official/r1/resnet/resnet_run_loop.py index d1c6282f871..d5087fb32a3 100644 --- a/official/r1/resnet/resnet_run_loop.py +++ b/official/r1/resnet/resnet_run_loop.py @@ -29,6 +29,7 @@ import os from absl import flags +from absl import logging import tensorflow as tf from official.r1.resnet import imagenet_preprocessing @@ -83,8 +84,8 @@ def process_record_dataset(dataset, options.experimental_threading.private_threadpool_size = ( datasets_num_private_threads) dataset = dataset.with_options(options) - tf.compat.v1.logging.info('datasets_num_private_threads: %s', - datasets_num_private_threads) + logging.info('datasets_num_private_threads: %s', + datasets_num_private_threads) # Disable intra-op parallelism to optimize for throughput instead of latency. options = tf.data.Options() @@ -205,17 +206,15 @@ def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj): what has been set by the user on the command-line. """ cpu_count = multiprocessing.cpu_count() - tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count) + logging.info('Logical CPU cores: %s', cpu_count) # Sets up thread pool for each GPU for op scheduling. per_gpu_thread_count = 1 total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) - tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s', - os.environ['TF_GPU_THREAD_COUNT']) - tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s', - os.environ['TF_GPU_THREAD_MODE']) + logging.info('TF_GPU_THREAD_COUNT: %s', os.environ['TF_GPU_THREAD_COUNT']) + logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE']) # Reduces general thread pool by number of threads used for GPU pool. main_thread_count = cpu_count - total_gpu_thread_count @@ -648,7 +647,7 @@ def input_fn_eval(): hooks=train_hooks, max_steps=flags_obj.max_train_steps) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_eval) - tf.compat.v1.logging.info('Starting to train and evaluate.') + logging.info('Starting to train and evaluate.') tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec) # tf.estimator.train_and_evalute doesn't return anything in multi-worker # case. @@ -671,8 +670,7 @@ def input_fn_eval(): schedule[-1] = train_epochs - sum(schedule[:-1]) # over counting. for cycle_index, num_train_epochs in enumerate(schedule): - tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index, - int(n_loops)) + logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops)) if num_train_epochs: # Since we are calling classifier.train immediately in each loop, the @@ -691,7 +689,7 @@ def input_fn_eval(): # allows the eval (which is generally unimportant in those circumstances) # to terminate. Note that eval will run for max_train_steps each loop, # regardless of the global_step count. - tf.compat.v1.logging.info('Starting to evaluate.') + logging.info('Starting to evaluate.') eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags_obj.max_train_steps) diff --git a/official/r1/utils/data/file_io.py b/official/r1/utils/data/file_io.py index 0f7f3b94749..b7776fc9e40 100644 --- a/official/r1/utils/data/file_io.py +++ b/official/r1/utils/data/file_io.py @@ -25,10 +25,11 @@ import tempfile import uuid +from absl import logging import numpy as np import six - import tensorflow as tf +# pylint:disable=logging-format-interpolation class _GarbageCollector(object): @@ -50,9 +51,9 @@ def purge(self): for i in self.temp_buffers: if tf.io.gfile.exists(i): tf.io.gfile.remove(i) - tf.compat.v1.logging.info("Buffer file {} removed".format(i)) + logging.info("Buffer file {} removed".format(i)) except Exception as e: - tf.compat.v1.logging.error("Failed to cleanup buffer files: {}".format(e)) + logging.error("Failed to cleanup buffer files: {}".format(e)) _GARBAGE_COLLECTOR = _GarbageCollector() @@ -176,7 +177,7 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): actual_size = tf.io.gfile.stat(buffer_path).length if expected_size == actual_size: return buffer_path - tf.compat.v1.logging.warning( + logging.warning( "Existing buffer {} has size {}. Expected size {}. Deleting and " "rebuilding buffer.".format(buffer_path, actual_size, expected_size)) tf.io.gfile.remove(buffer_path) @@ -187,8 +188,7 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): tf.io.gfile.makedirs(os.path.split(buffer_path)[0]) - tf.compat.v1.logging.info("Constructing TFRecordDataset buffer: {}" - .format(buffer_path)) + logging.info("Constructing TFRecordDataset buffer: {}".format(buffer_path)) count = 0 pool = multiprocessing.dummy.Pool(multiprocessing.cpu_count()) @@ -198,10 +198,10 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): rows_per_core=_ROWS_PER_CORE): _serialize_shards(df_shards, columns, pool, writer) count += sum([len(s) for s in df_shards]) - tf.compat.v1.logging.info("{}/{} examples written." - .format(str(count).ljust(8), len(dataframe))) + logging.info("{}/{} examples written.".format( + str(count).ljust(8), len(dataframe))) finally: pool.terminate() - tf.compat.v1.logging.info("Buffer write complete.") + logging.info("Buffer write complete.") return buffer_path diff --git a/official/r1/wide_deep/census_test.py b/official/r1/wide_deep/census_test.py index f0c601340cd..8e1c8657902 100644 --- a/official/r1/wide_deep/census_test.py +++ b/official/r1/wide_deep/census_test.py @@ -21,13 +21,14 @@ import unittest import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging from official.utils.misc import keras_utils from official.utils.testing import integration from official.r1.wide_deep import census_dataset from official.r1.wide_deep import census_main -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,' 'Husband,zyx,wvu,34,56,78,tsr,<=50K') diff --git a/official/r1/wide_deep/movielens_test.py b/official/r1/wide_deep/movielens_test.py index 1bcdd3bdebd..5117f6285b4 100644 --- a/official/r1/wide_deep/movielens_test.py +++ b/official/r1/wide_deep/movielens_test.py @@ -28,8 +28,9 @@ from official.utils.testing import integration from official.r1.wide_deep import movielens_dataset from official.r1.wide_deep import movielens_main +from absl import logging -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) TEST_INPUT_VALUES = { diff --git a/official/utils/flags/_device.py b/official/utils/flags/_device.py index edaf2f9a16c..d8974fc48d1 100644 --- a/official/utils/flags/_device.py +++ b/official/utils/flags/_device.py @@ -19,7 +19,7 @@ from __future__ import print_function from absl import flags -import tensorflow as tf +from absl import logging from official.utils.flags._conventions import help_wrap @@ -39,7 +39,7 @@ def _path_check(flag_values): # pylint: disable=missing-docstring valid_flags = True for key in flag_names: if not flag_values[key].startswith("gs://"): - tf.compat.v1.logging.error("{} must be a GCS path.".format(key)) + logging.error("%s must be a GCS path.", key) valid_flags = False return valid_flags diff --git a/official/utils/logs/hooks_helper.py b/official/utils/logs/hooks_helper.py index 50a380d9edd..be2fc4d70d8 100644 --- a/official/utils/logs/hooks_helper.py +++ b/official/utils/logs/hooks_helper.py @@ -25,6 +25,7 @@ from __future__ import print_function import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging from official.utils.logs import hooks from official.utils.logs import logger @@ -57,9 +58,9 @@ def get_train_hooks(name_list, use_tpu=False, **kwargs): return [] if use_tpu: - tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a ' - 'TPU is specified. No hooks will be used.' - .format(name_list)) + logging.warning( + 'hooks_helper received name_list `%s`, but a ' + 'TPU is specified. No hooks will be used.', name_list) return [] train_hooks = [] diff --git a/official/utils/logs/hooks_test.py b/official/utils/logs/hooks_test.py index 70697797d0a..adbc0a293f5 100644 --- a/official/utils/logs/hooks_test.py +++ b/official/utils/logs/hooks_test.py @@ -21,12 +21,13 @@ import time +from absl import logging import tensorflow as tf # pylint: disable=g-bad-import-order from official.utils.logs import hooks from official.utils.testing import mock_lib -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) +logging.set_verbosity(logging.DEBUG) class ExamplesPerSecondHookTest(tf.test.TestCase): diff --git a/official/utils/logs/logger.py b/official/utils/logs/logger.py index 398aa8a51f4..269a470b441 100644 --- a/official/utils/logs/logger.py +++ b/official/utils/logs/logger.py @@ -35,6 +35,7 @@ from absl import flags import tensorflow as tf from tensorflow.python.client import device_lib +from absl import logging from official.utils.logs import cloud_lib @@ -119,9 +120,8 @@ def log_evaluation_result(self, eval_results): eval_results: dict, the result of evaluate. """ if not isinstance(eval_results, dict): - tf.compat.v1.logging.warning( - "eval_results should be dictionary for logging. Got %s", - type(eval_results)) + logging.warning("eval_results should be dictionary for logging. Got %s", + type(eval_results)) return global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP] for key in sorted(eval_results): @@ -144,12 +144,12 @@ def log_metric(self, name, value, unit=None, global_step=None, extras=None): """ metric = _process_metric_to_json(name, value, unit, global_step, extras) if metric: - tf.compat.v1.logging.info("Benchmark metric: %s", metric) + logging.info("Benchmark metric: %s", metric) def log_run_info(self, model_name, dataset_name, run_params, test_id=None): - tf.compat.v1.logging.info( - "Benchmark run: %s", _gather_run_info(model_name, dataset_name, - run_params, test_id)) + logging.info( + "Benchmark run: %s", + _gather_run_info(model_name, dataset_name, run_params, test_id)) def on_finish(self, status): pass @@ -187,7 +187,7 @@ def log_metric(self, name, value, unit=None, global_step=None, extras=None): self._metric_file_handler.write("\n") self._metric_file_handler.flush() except (TypeError, ValueError) as e: - tf.compat.v1.logging.warning( + logging.warning( "Failed to dump metric to log file: name %s, value %s, error %s", name, value, e) @@ -212,8 +212,7 @@ def log_run_info(self, model_name, dataset_name, run_params, test_id=None): json.dump(run_info, f) f.write("\n") except (TypeError, ValueError) as e: - tf.compat.v1.logging.warning( - "Failed to dump benchmark run info to log file: %s", e) + logging.warning("Failed to dump benchmark run info to log file: %s", e) def on_finish(self, status): self._metric_file_handler.flush() @@ -322,8 +321,8 @@ def _process_metric_to_json( name, value, unit=None, global_step=None, extras=None): """Validate the metric data and generate JSON for insert.""" if not isinstance(value, numbers.Number): - tf.compat.v1.logging.warning( - "Metric value to log should be a number. Got %s", type(value)) + logging.warning("Metric value to log should be a number. Got %s", + type(value)) return None extras = _convert_to_json_dict(extras) @@ -383,8 +382,7 @@ def _collect_cpu_info(run_info): run_info["machine_config"]["cpu_info"] = cpu_info except ImportError: - tf.compat.v1.logging.warn( - "'cpuinfo' not imported. CPU info will not be logged.") + logging.warn("'cpuinfo' not imported. CPU info will not be logged.") def _collect_memory_info(run_info): @@ -396,8 +394,7 @@ def _collect_memory_info(run_info): run_info["machine_config"]["memory_total"] = vmem.total run_info["machine_config"]["memory_available"] = vmem.available except ImportError: - tf.compat.v1.logging.warn( - "'psutil' not imported. Memory info will not be logged.") + logging.warn("'psutil' not imported. Memory info will not be logged.") def _collect_test_environment(run_info): diff --git a/official/utils/logs/logger_test.py b/official/utils/logs/logger_test.py index 520db5ffd05..770ad24653c 100644 --- a/official/utils/logs/logger_test.py +++ b/official/utils/logs/logger_test.py @@ -28,6 +28,7 @@ import mock from absl.testing import flagsaver import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging try: from google.cloud import bigquery @@ -79,7 +80,7 @@ def test_benchmark_context(self, mock_config_benchmark_logger): mock_logger = mock.MagicMock() mock_config_benchmark_logger.return_value = mock_logger with logger.benchmark_context(None): - tf.compat.v1.logging.info("start benchmarking") + logging.info("start benchmarking") mock_logger.on_finish.assert_called_once_with(logger.RUN_STATUS_SUCCESS) @mock.patch("official.utils.logs.logger.config_benchmark_logger") @@ -96,18 +97,18 @@ class BaseBenchmarkLoggerTest(tf.test.TestCase): def setUp(self): super(BaseBenchmarkLoggerTest, self).setUp() - self._actual_log = tf.compat.v1.logging.info + self._actual_log = logging.info self.logged_message = None def mock_log(*args, **kwargs): self.logged_message = args self._actual_log(*args, **kwargs) - tf.compat.v1.logging.info = mock_log + logging.info = mock_log def tearDown(self): super(BaseBenchmarkLoggerTest, self).tearDown() - tf.compat.v1.logging.info = self._actual_log + logging.info = self._actual_log def test_log_metric(self): log = logger.BaseBenchmarkLogger() diff --git a/official/utils/logs/mlperf_helper.py b/official/utils/logs/mlperf_helper.py index c9c04344d08..a07360c4aad 100644 --- a/official/utils/logs/mlperf_helper.py +++ b/official/utils/logs/mlperf_helper.py @@ -31,8 +31,9 @@ import subprocess import sys import typing +from absl import logging +# pylint:disable=logging-format-interpolation -import tensorflow as tf _MIN_VERSION = (0, 0, 10) _STACK_OFFSET = 2 @@ -94,10 +95,9 @@ def test_mlperf_log_pip_version(): version = pkg_resources.get_distribution("mlperf_compliance") version = tuple(int(i) for i in version.version.split(".")) if version < _MIN_VERSION: - tf.compat.v1.logging.warning( - "mlperf_compliance is version {}, must be >= {}".format( - ".".join([str(i) for i in version]), - ".".join([str(i) for i in _MIN_VERSION]))) + logging.warning("mlperf_compliance is version {}, must be >= {}".format( + ".".join([str(i) for i in version]), + ".".join([str(i) for i in _MIN_VERSION]))) raise ImportError return mlperf_compliance.mlperf_log @@ -187,6 +187,6 @@ def clear_system_caches(): if __name__ == "__main__": - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) with LOGGER(True): ncf_print(key=TAGS.RUN_START) diff --git a/official/utils/misc/distribution_utils.py b/official/utils/misc/distribution_utils.py index fd5c7423e76..98d51cfb1c1 100644 --- a/official/utils/misc/distribution_utils.py +++ b/official/utils/misc/distribution_utils.py @@ -22,6 +22,8 @@ import os import random import string + +from absl import logging import tensorflow.compat.v2 as tf from official.utils.misc import tpu_lib @@ -252,7 +254,7 @@ def initialize(self): def _monkey_patch_dataset_method(strategy): """Monkey-patch `strategy`'s `make_dataset_iterator` method.""" def make_dataset(self, dataset): - tf.compat.v1.logging.info('Using pure synthetic data.') + logging.info('Using pure synthetic data.') with self.scope(): if self.extended._global_batch_size: # pylint: disable=protected-access return SyntheticDataset(dataset, self.num_replicas_in_sync) diff --git a/official/utils/misc/model_helpers.py b/official/utils/misc/model_helpers.py index c112bacd420..9a44e50ac46 100644 --- a/official/utils/misc/model_helpers.py +++ b/official/utils/misc/model_helpers.py @@ -20,8 +20,11 @@ import numbers +from absl import logging import tensorflow as tf + from tensorflow.python.util import nest +# pylint:disable=logging-format-interpolation def past_stop_threshold(stop_threshold, eval_metric): @@ -48,9 +51,8 @@ def past_stop_threshold(stop_threshold, eval_metric): "must be a number.") if eval_metric >= stop_threshold: - tf.compat.v1.logging.info( - "Stop threshold of {} was passed with metric value {}.".format( - stop_threshold, eval_metric)) + logging.info("Stop threshold of {} was passed with metric value {}.".format( + stop_threshold, eval_metric)) return True return False @@ -88,6 +90,6 @@ def generate_synthetic_data( def apply_clean(flags_obj): if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir): - tf.compat.v1.logging.info("--clean flag set. Removing existing model dir:" - " {}".format(flags_obj.model_dir)) + logging.info("--clean flag set. Removing existing model dir:" + " {}".format(flags_obj.model_dir)) tf.io.gfile.rmtree(flags_obj.model_dir) diff --git a/official/utils/testing/perfzero_benchmark.py b/official/utils/testing/perfzero_benchmark.py index af4a91290a8..895f8238ba2 100644 --- a/official/utils/testing/perfzero_benchmark.py +++ b/official/utils/testing/perfzero_benchmark.py @@ -20,8 +20,9 @@ import os from absl import flags +from absl import logging from absl.testing import flagsaver -import tensorflow as tf # pylint: disable=g-bad-import-order +import tensorflow as tf FLAGS = flags.FLAGS @@ -75,7 +76,7 @@ def _get_model_dir(self, folder_name): def _setup(self): """Sets up and resets flags before each test.""" - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) if PerfZeroBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method()