From 3043566dd0d3a40c6445893a481a29906ede2788 Mon Sep 17 00:00:00 2001 From: ayushmankumar7 Date: Wed, 18 Mar 2020 00:05:38 +0530 Subject: [PATCH 1/2] tf.compat.v1.logging implemented with absl --- .../benchmark/models/resnet_cifar_main.py | 5 +++-- official/benchmark/ncf_keras_benchmark.py | 3 ++- .../modeling/model_training_utils_test.py | 3 ++- official/nlp/transformer/translate.py | 12 +++++------ official/nlp/transformer/utils/tokenizer.py | 21 ++++++++++--------- official/r1/mnist/mnist.py | 3 ++- official/r1/mnist/mnist_test.py | 4 ++-- official/r1/resnet/cifar10_main.py | 7 ++++--- official/r1/resnet/cifar10_test.py | 3 ++- official/r1/resnet/estimator_benchmark.py | 3 ++- official/r1/resnet/imagenet_main.py | 5 +++-- official/r1/resnet/imagenet_test.py | 2 +- official/r1/resnet/resnet_run_loop.py | 15 ++++++------- official/r1/utils/data/file_io.py | 13 ++++++------ official/r1/wide_deep/census_test.py | 3 ++- official/r1/wide_deep/movielens_test.py | 3 ++- official/utils/flags/_device.py | 3 ++- official/utils/logs/hooks_helper.py | 3 ++- official/utils/logs/hooks_test.py | 2 +- official/utils/logs/logger.py | 17 ++++++++------- official/utils/logs/logger_test.py | 9 ++++---- official/utils/logs/mlperf_helper.py | 5 +++-- official/utils/misc/distribution_utils.py | 3 ++- official/utils/misc/model_helpers.py | 5 +++-- official/utils/testing/perfzero_benchmark.py | 4 ++-- .../image_classification/common_test.py | 3 ++- 26 files changed, 90 insertions(+), 69 deletions(-) diff --git a/official/benchmark/models/resnet_cifar_main.py b/official/benchmark/models/resnet_cifar_main.py index 114adb4b046..6b8656c8e62 100644 --- a/official/benchmark/models/resnet_cifar_main.py +++ b/official/benchmark/models/resnet_cifar_main.py @@ -20,6 +20,7 @@ from absl import app from absl import flags +from absl import logging import numpy as np import tensorflow as tf from official.benchmark.models import resnet_cifar_model @@ -100,7 +101,7 @@ def on_batch_begin(self, batch, logs=None): if lr != self.prev_lr: self.model.optimizer.learning_rate = lr # lr should be a float here self.prev_lr = lr - tf.compat.v1.logging.debug( + logging.debug( 'Epoch %05d Batch %05d: LearningRateBatchScheduler ' 'change learning rate to %s.', self.epochs, batch, lr) @@ -280,6 +281,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_cifar_flags() app.run(main) diff --git a/official/benchmark/ncf_keras_benchmark.py b/official/benchmark/ncf_keras_benchmark.py index 1d66b52199a..61588f43e01 100644 --- a/official/benchmark/ncf_keras_benchmark.py +++ b/official/benchmark/ncf_keras_benchmark.py @@ -23,6 +23,7 @@ from absl import flags from absl.testing import flagsaver +from absl import logging import tensorflow as tf from official.recommendation import ncf_common @@ -51,7 +52,7 @@ def __init__(self, def _setup(self): """Sets up and resets flags before each test.""" assert tf.version.VERSION.startswith('2.') - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) if NCFKerasBenchmarkBase.local_flags is None: ncf_common.define_ncf_flags() # Loads flags to get defaults to then override. List cannot be empty. diff --git a/official/modeling/model_training_utils_test.py b/official/modeling/model_training_utils_test.py index 11079a5bc56..d03c926469a 100644 --- a/official/modeling/model_training_utils_test.py +++ b/official/modeling/model_training_utils_test.py @@ -22,6 +22,7 @@ from absl.testing import parameterized from absl.testing.absltest import mock +from absl import logging import numpy as np import tensorflow as tf @@ -125,7 +126,7 @@ def summaries_with_matching_keyword(keyword, summary_dir): if event.summary is not None: for value in event.summary.value: if keyword in value.tag: - tf.compat.v1.logging.error(event) + logging.error(event) yield event.summary diff --git a/official/nlp/transformer/translate.py b/official/nlp/transformer/translate.py index dd7b30a8a53..0cda3139bd4 100644 --- a/official/nlp/transformer/translate.py +++ b/official/nlp/transformer/translate.py @@ -20,7 +20,7 @@ import numpy as np import tensorflow as tf - +from absl import logging from official.nlp.transformer.utils import tokenizer _EXTRA_DECODE_LENGTH = 100 @@ -117,7 +117,7 @@ def input_generator(): maxlen=params["decode_max_length"], dtype="int32", padding="post") - tf.compat.v1.logging.info("Decoding batch %d out of %d.", i, + logging.info("Decoding batch %d out of %d.", i, num_decode_batches) yield batch @@ -172,7 +172,7 @@ def text_as_per_replica(): translation = _trim_and_decode(val_outputs[j], subtokenizer) translations.append(translation) if print_all_translations: - tf.compat.v1.logging.info( + logging.info( "Translating:\n\tInput: %s\n\tOutput: %s" % (sorted_inputs[j + i * batch_size], translation)) @@ -181,7 +181,7 @@ def text_as_per_replica(): if tf.io.gfile.isdir(output_file): raise ValueError("File output is a directory, will not save outputs to " "file.") - tf.compat.v1.logging.info("Writing to file %s" % output_file) + logging.info("Writing to file %s" % output_file) with tf.compat.v1.gfile.Open(output_file, "w") as f: for i in sorted_keys: f.write("%s\n" % translations[i]) @@ -191,10 +191,10 @@ def translate_from_text(model, subtokenizer, txt): encoded_txt = _encode_and_add_eos(txt, subtokenizer) result = model.predict(encoded_txt) outputs = result["outputs"] - tf.compat.v1.logging.info("Original: \"%s\"" % txt) + logging.info("Original: \"%s\"" % txt) translate_from_input(outputs, subtokenizer) def translate_from_input(outputs, subtokenizer): translation = _trim_and_decode(outputs, subtokenizer) - tf.compat.v1.logging.info("Translation: \"%s\"" % translation) + logging.info("Translation: \"%s\"" % translation) diff --git a/official/nlp/transformer/utils/tokenizer.py b/official/nlp/transformer/utils/tokenizer.py index 20302266acc..52c7ddc27b8 100644 --- a/official/nlp/transformer/utils/tokenizer.py +++ b/official/nlp/transformer/utils/tokenizer.py @@ -22,6 +22,7 @@ import re import sys import unicodedata +from absl import logging import numpy as np import six @@ -63,7 +64,7 @@ class Subtokenizer(object): def __init__(self, vocab_file, reserved_tokens=None): """Initializes class, creating a vocab file if data_files is provided.""" - tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." % + logging.info("Initializing Subtokenizer from file %s." % vocab_file) if reserved_tokens is None: @@ -109,15 +110,15 @@ def init_from_files( reserved_tokens = RESERVED_TOKENS if tf.io.gfile.exists(vocab_file): - tf.compat.v1.logging.info("Vocab file already exists (%s)" % vocab_file) + logging.info("Vocab file already exists (%s)" % vocab_file) else: - tf.compat.v1.logging.info("Begin steps to create subtoken vocabulary...") + logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit, correct_strip) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) - tf.compat.v1.logging.info("Generated vocabulary with %d subtokens." % + logging.info("Generated vocabulary with %d subtokens." % len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file) @@ -402,7 +403,7 @@ def _generate_subtokens_with_target_vocab_size( reserved_tokens = RESERVED_TOKENS if min_count is not None: - tf.compat.v1.logging.info( + logging.info( "Using min_count=%d to generate vocab with target size %d" % (min_count, target_size)) return _generate_subtokens( @@ -411,13 +412,13 @@ def _generate_subtokens_with_target_vocab_size( def bisect(min_val, max_val): """Recursive function to binary search for subtoken vocabulary.""" cur_count = (min_val + max_val) // 2 - tf.compat.v1.logging.info("Binary search: trying min_count=%d (%d %d)" % + logging.info("Binary search: trying min_count=%d (%d %d)" % (cur_count, min_val, max_val)) subtoken_list = _generate_subtokens( token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens) val = len(subtoken_list) - tf.compat.v1.logging.info( + logging.info( "Binary search: min_count=%d resulted in %d tokens" % (cur_count, val)) within_threshold = abs(val - target_size) < threshold @@ -434,7 +435,7 @@ def bisect(min_val, max_val): return other_subtoken_list return subtoken_list - tf.compat.v1.logging.info("Finding best min_count to get target size of %d" % + logging.info("Finding best min_count to get target size of %d" % target_size) return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT) @@ -603,7 +604,7 @@ def _generate_subtokens( # subtoken_dict, count how often the resulting subtokens appear, and update # the dictionary with subtokens w/ high enough counts. for i in xrange(num_iterations): - tf.compat.v1.logging.info("\tGenerating subtokens: iteration %d" % i) + logging.info("\tGenerating subtokens: iteration %d" % i) # Generate new subtoken->id dictionary using the new subtoken list. subtoken_dict = _list_to_index_dict(subtoken_list) @@ -616,5 +617,5 @@ def _generate_subtokens( subtoken_list, max_subtoken_length = _gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) - tf.compat.v1.logging.info("\tVocab size: %d" % len(subtoken_list)) + logging.info("\tVocab size: %d" % len(subtoken_list)) return subtoken_list diff --git a/official/r1/mnist/mnist.py b/official/r1/mnist/mnist.py index 2b4d3a308d3..9cc5889c96d 100644 --- a/official/r1/mnist/mnist.py +++ b/official/r1/mnist/mnist.py @@ -17,6 +17,7 @@ from __future__ import division from __future__ import print_function +from absl import logging from absl import app as absl_app from absl import flags from six.moves import range @@ -243,6 +244,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_mnist_flags() absl_app.run(main) diff --git a/official/r1/mnist/mnist_test.py b/official/r1/mnist/mnist_test.py index 9c5b6dc393a..207c0d0bbff 100644 --- a/official/r1/mnist/mnist_test.py +++ b/official/r1/mnist/mnist_test.py @@ -21,7 +21,7 @@ import unittest import tensorflow as tf # pylint: disable=g-bad-import-order - +from absl import logging from official.r1.mnist import mnist from official.utils.misc import keras_utils @@ -143,5 +143,5 @@ def benchmark_train_step_time(self): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + logging.set_verbosity(logging.ERROR) tf.test.main() diff --git a/official/r1/resnet/cifar10_main.py b/official/r1/resnet/cifar10_main.py index fa6a2f0d6f1..6362a0e3833 100644 --- a/official/r1/resnet/cifar10_main.py +++ b/official/r1/resnet/cifar10_main.py @@ -20,6 +20,7 @@ import os +from absl import logging from absl import app as absl_app from absl import flags from six.moves import range @@ -139,7 +140,7 @@ def input_fn(is_training, dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES) if input_context: - tf.compat.v1.logging.info( + logging.info( 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % ( input_context.input_pipeline_id, input_context.num_input_pipelines)) dataset = dataset.shard(input_context.num_input_pipelines, @@ -270,7 +271,7 @@ def run_cifar(flags_obj): Dictionary of results. Including final accuracy. """ if flags_obj.image_bytes_as_serving_input: - tf.compat.v1.logging.fatal( + logging.fatal( '--image_bytes_as_serving_input cannot be set to True for CIFAR. ' 'This flag is only applicable to ImageNet.') return @@ -291,6 +292,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_cifar_flags() absl_app.run(main) diff --git a/official/r1/resnet/cifar10_test.py b/official/r1/resnet/cifar10_test.py index 7bd88715727..ac30dbcd676 100644 --- a/official/r1/resnet/cifar10_test.py +++ b/official/r1/resnet/cifar10_test.py @@ -21,12 +21,13 @@ import numpy as np import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging from official.r1.resnet import cifar10_main from official.utils.misc import keras_utils from official.utils.testing import integration -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) _BATCH_SIZE = 128 _HEIGHT = 32 diff --git a/official/r1/resnet/estimator_benchmark.py b/official/r1/resnet/estimator_benchmark.py index 18056fa5830..eef3ea467e3 100644 --- a/official/r1/resnet/estimator_benchmark.py +++ b/official/r1/resnet/estimator_benchmark.py @@ -21,6 +21,7 @@ import time from absl import flags +from absl import logging from absl.testing import flagsaver import tensorflow as tf # pylint: disable=g-bad-import-order @@ -56,7 +57,7 @@ def _get_model_dir(self, folder_name): def _setup(self): """Sets up and resets flags before each test.""" - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) if EstimatorBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method() diff --git a/official/r1/resnet/imagenet_main.py b/official/r1/resnet/imagenet_main.py index 0bbb9fe9fb9..1959080d92c 100644 --- a/official/r1/resnet/imagenet_main.py +++ b/official/r1/resnet/imagenet_main.py @@ -22,6 +22,7 @@ from absl import app as absl_app from absl import flags +from absl import logging from six.moves import range import tensorflow as tf @@ -194,7 +195,7 @@ def input_fn(is_training, dataset = tf.data.Dataset.from_tensor_slices(filenames) if input_context: - tf.compat.v1.logging.info( + logging.info( 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % ( input_context.input_pipeline_id, input_context.num_input_pipelines)) dataset = dataset.shard(input_context.num_input_pipelines, @@ -387,6 +388,6 @@ def main(_): if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) define_imagenet_flags() absl_app.run(main) diff --git a/official/r1/resnet/imagenet_test.py b/official/r1/resnet/imagenet_test.py index 5a7ddcd686f..226bcd93bae 100644 --- a/official/r1/resnet/imagenet_test.py +++ b/official/r1/resnet/imagenet_test.py @@ -25,7 +25,7 @@ from official.utils.misc import keras_utils from official.utils.testing import integration -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) _BATCH_SIZE = 32 _LABEL_CLASSES = 1001 diff --git a/official/r1/resnet/resnet_run_loop.py b/official/r1/resnet/resnet_run_loop.py index d1c6282f871..0e15cfecdaf 100644 --- a/official/r1/resnet/resnet_run_loop.py +++ b/official/r1/resnet/resnet_run_loop.py @@ -28,6 +28,7 @@ import multiprocessing import os +from absl import logging from absl import flags import tensorflow as tf @@ -83,7 +84,7 @@ def process_record_dataset(dataset, options.experimental_threading.private_threadpool_size = ( datasets_num_private_threads) dataset = dataset.with_options(options) - tf.compat.v1.logging.info('datasets_num_private_threads: %s', + logging.info('datasets_num_private_threads: %s', datasets_num_private_threads) # Disable intra-op parallelism to optimize for throughput instead of latency. @@ -205,16 +206,16 @@ def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj): what has been set by the user on the command-line. """ cpu_count = multiprocessing.cpu_count() - tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count) + logging.info('Logical CPU cores: %s', cpu_count) # Sets up thread pool for each GPU for op scheduling. per_gpu_thread_count = 1 total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) - tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s', + logging.info('TF_GPU_THREAD_COUNT: %s', os.environ['TF_GPU_THREAD_COUNT']) - tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s', + logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE']) # Reduces general thread pool by number of threads used for GPU pool. @@ -648,7 +649,7 @@ def input_fn_eval(): hooks=train_hooks, max_steps=flags_obj.max_train_steps) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_eval) - tf.compat.v1.logging.info('Starting to train and evaluate.') + logging.info('Starting to train and evaluate.') tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec) # tf.estimator.train_and_evalute doesn't return anything in multi-worker # case. @@ -671,7 +672,7 @@ def input_fn_eval(): schedule[-1] = train_epochs - sum(schedule[:-1]) # over counting. for cycle_index, num_train_epochs in enumerate(schedule): - tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index, + logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops)) if num_train_epochs: @@ -691,7 +692,7 @@ def input_fn_eval(): # allows the eval (which is generally unimportant in those circumstances) # to terminate. Note that eval will run for max_train_steps each loop, # regardless of the global_step count. - tf.compat.v1.logging.info('Starting to evaluate.') + logging.info('Starting to evaluate.') eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags_obj.max_train_steps) diff --git a/official/r1/utils/data/file_io.py b/official/r1/utils/data/file_io.py index 0f7f3b94749..6dbff7759b9 100644 --- a/official/r1/utils/data/file_io.py +++ b/official/r1/utils/data/file_io.py @@ -27,6 +27,7 @@ import numpy as np import six +from absl import logging import tensorflow as tf @@ -50,9 +51,9 @@ def purge(self): for i in self.temp_buffers: if tf.io.gfile.exists(i): tf.io.gfile.remove(i) - tf.compat.v1.logging.info("Buffer file {} removed".format(i)) + logging.info("Buffer file {} removed".format(i)) except Exception as e: - tf.compat.v1.logging.error("Failed to cleanup buffer files: {}".format(e)) + logging.error("Failed to cleanup buffer files: {}".format(e)) _GARBAGE_COLLECTOR = _GarbageCollector() @@ -176,7 +177,7 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): actual_size = tf.io.gfile.stat(buffer_path).length if expected_size == actual_size: return buffer_path - tf.compat.v1.logging.warning( + logging.warning( "Existing buffer {} has size {}. Expected size {}. Deleting and " "rebuilding buffer.".format(buffer_path, actual_size, expected_size)) tf.io.gfile.remove(buffer_path) @@ -187,7 +188,7 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): tf.io.gfile.makedirs(os.path.split(buffer_path)[0]) - tf.compat.v1.logging.info("Constructing TFRecordDataset buffer: {}" + logging.info("Constructing TFRecordDataset buffer: {}" .format(buffer_path)) count = 0 @@ -198,10 +199,10 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): rows_per_core=_ROWS_PER_CORE): _serialize_shards(df_shards, columns, pool, writer) count += sum([len(s) for s in df_shards]) - tf.compat.v1.logging.info("{}/{} examples written." + logging.info("{}/{} examples written." .format(str(count).ljust(8), len(dataframe))) finally: pool.terminate() - tf.compat.v1.logging.info("Buffer write complete.") + logging.info("Buffer write complete.") return buffer_path diff --git a/official/r1/wide_deep/census_test.py b/official/r1/wide_deep/census_test.py index f0c601340cd..8e1c8657902 100644 --- a/official/r1/wide_deep/census_test.py +++ b/official/r1/wide_deep/census_test.py @@ -21,13 +21,14 @@ import unittest import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging from official.utils.misc import keras_utils from official.utils.testing import integration from official.r1.wide_deep import census_dataset from official.r1.wide_deep import census_main -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,' 'Husband,zyx,wvu,34,56,78,tsr,<=50K') diff --git a/official/r1/wide_deep/movielens_test.py b/official/r1/wide_deep/movielens_test.py index 1bcdd3bdebd..5117f6285b4 100644 --- a/official/r1/wide_deep/movielens_test.py +++ b/official/r1/wide_deep/movielens_test.py @@ -28,8 +28,9 @@ from official.utils.testing import integration from official.r1.wide_deep import movielens_dataset from official.r1.wide_deep import movielens_main +from absl import logging -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) +logging.set_verbosity(logging.ERROR) TEST_INPUT_VALUES = { diff --git a/official/utils/flags/_device.py b/official/utils/flags/_device.py index edaf2f9a16c..535b63b3b95 100644 --- a/official/utils/flags/_device.py +++ b/official/utils/flags/_device.py @@ -20,6 +20,7 @@ from absl import flags import tensorflow as tf +from absl import logging from official.utils.flags._conventions import help_wrap @@ -39,7 +40,7 @@ def _path_check(flag_values): # pylint: disable=missing-docstring valid_flags = True for key in flag_names: if not flag_values[key].startswith("gs://"): - tf.compat.v1.logging.error("{} must be a GCS path.".format(key)) + logging.error("{} must be a GCS path.".format(key)) valid_flags = False return valid_flags diff --git a/official/utils/logs/hooks_helper.py b/official/utils/logs/hooks_helper.py index 50a380d9edd..25175fffbe5 100644 --- a/official/utils/logs/hooks_helper.py +++ b/official/utils/logs/hooks_helper.py @@ -25,6 +25,7 @@ from __future__ import print_function import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging from official.utils.logs import hooks from official.utils.logs import logger @@ -57,7 +58,7 @@ def get_train_hooks(name_list, use_tpu=False, **kwargs): return [] if use_tpu: - tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a ' + logging.warning('hooks_helper received name_list `{}`, but a ' 'TPU is specified. No hooks will be used.' .format(name_list)) return [] diff --git a/official/utils/logs/hooks_test.py b/official/utils/logs/hooks_test.py index 70697797d0a..2c01382fac5 100644 --- a/official/utils/logs/hooks_test.py +++ b/official/utils/logs/hooks_test.py @@ -26,7 +26,7 @@ from official.utils.logs import hooks from official.utils.testing import mock_lib -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) +logging.set_verbosity(logging.DEBUG) class ExamplesPerSecondHookTest(tf.test.TestCase): diff --git a/official/utils/logs/logger.py b/official/utils/logs/logger.py index 398aa8a51f4..027422c6cb4 100644 --- a/official/utils/logs/logger.py +++ b/official/utils/logs/logger.py @@ -35,6 +35,7 @@ from absl import flags import tensorflow as tf from tensorflow.python.client import device_lib +from absl import logging from official.utils.logs import cloud_lib @@ -119,7 +120,7 @@ def log_evaluation_result(self, eval_results): eval_results: dict, the result of evaluate. """ if not isinstance(eval_results, dict): - tf.compat.v1.logging.warning( + logging.warning( "eval_results should be dictionary for logging. Got %s", type(eval_results)) return @@ -144,10 +145,10 @@ def log_metric(self, name, value, unit=None, global_step=None, extras=None): """ metric = _process_metric_to_json(name, value, unit, global_step, extras) if metric: - tf.compat.v1.logging.info("Benchmark metric: %s", metric) + logging.info("Benchmark metric: %s", metric) def log_run_info(self, model_name, dataset_name, run_params, test_id=None): - tf.compat.v1.logging.info( + logging.info( "Benchmark run: %s", _gather_run_info(model_name, dataset_name, run_params, test_id)) @@ -187,7 +188,7 @@ def log_metric(self, name, value, unit=None, global_step=None, extras=None): self._metric_file_handler.write("\n") self._metric_file_handler.flush() except (TypeError, ValueError) as e: - tf.compat.v1.logging.warning( + logging.warning( "Failed to dump metric to log file: name %s, value %s, error %s", name, value, e) @@ -212,7 +213,7 @@ def log_run_info(self, model_name, dataset_name, run_params, test_id=None): json.dump(run_info, f) f.write("\n") except (TypeError, ValueError) as e: - tf.compat.v1.logging.warning( + logging.warning( "Failed to dump benchmark run info to log file: %s", e) def on_finish(self, status): @@ -322,7 +323,7 @@ def _process_metric_to_json( name, value, unit=None, global_step=None, extras=None): """Validate the metric data and generate JSON for insert.""" if not isinstance(value, numbers.Number): - tf.compat.v1.logging.warning( + logging.warning( "Metric value to log should be a number. Got %s", type(value)) return None @@ -383,7 +384,7 @@ def _collect_cpu_info(run_info): run_info["machine_config"]["cpu_info"] = cpu_info except ImportError: - tf.compat.v1.logging.warn( + logging.warn( "'cpuinfo' not imported. CPU info will not be logged.") @@ -396,7 +397,7 @@ def _collect_memory_info(run_info): run_info["machine_config"]["memory_total"] = vmem.total run_info["machine_config"]["memory_available"] = vmem.available except ImportError: - tf.compat.v1.logging.warn( + logging.warn( "'psutil' not imported. Memory info will not be logged.") diff --git a/official/utils/logs/logger_test.py b/official/utils/logs/logger_test.py index 520db5ffd05..770ad24653c 100644 --- a/official/utils/logs/logger_test.py +++ b/official/utils/logs/logger_test.py @@ -28,6 +28,7 @@ import mock from absl.testing import flagsaver import tensorflow as tf # pylint: disable=g-bad-import-order +from absl import logging try: from google.cloud import bigquery @@ -79,7 +80,7 @@ def test_benchmark_context(self, mock_config_benchmark_logger): mock_logger = mock.MagicMock() mock_config_benchmark_logger.return_value = mock_logger with logger.benchmark_context(None): - tf.compat.v1.logging.info("start benchmarking") + logging.info("start benchmarking") mock_logger.on_finish.assert_called_once_with(logger.RUN_STATUS_SUCCESS) @mock.patch("official.utils.logs.logger.config_benchmark_logger") @@ -96,18 +97,18 @@ class BaseBenchmarkLoggerTest(tf.test.TestCase): def setUp(self): super(BaseBenchmarkLoggerTest, self).setUp() - self._actual_log = tf.compat.v1.logging.info + self._actual_log = logging.info self.logged_message = None def mock_log(*args, **kwargs): self.logged_message = args self._actual_log(*args, **kwargs) - tf.compat.v1.logging.info = mock_log + logging.info = mock_log def tearDown(self): super(BaseBenchmarkLoggerTest, self).tearDown() - tf.compat.v1.logging.info = self._actual_log + logging.info = self._actual_log def test_log_metric(self): log = logger.BaseBenchmarkLogger() diff --git a/official/utils/logs/mlperf_helper.py b/official/utils/logs/mlperf_helper.py index c9c04344d08..fd1437a99ad 100644 --- a/official/utils/logs/mlperf_helper.py +++ b/official/utils/logs/mlperf_helper.py @@ -31,6 +31,7 @@ import subprocess import sys import typing +from absl import logging import tensorflow as tf @@ -94,7 +95,7 @@ def test_mlperf_log_pip_version(): version = pkg_resources.get_distribution("mlperf_compliance") version = tuple(int(i) for i in version.version.split(".")) if version < _MIN_VERSION: - tf.compat.v1.logging.warning( + logging.warning( "mlperf_compliance is version {}, must be >= {}".format( ".".join([str(i) for i in version]), ".".join([str(i) for i in _MIN_VERSION]))) @@ -187,6 +188,6 @@ def clear_system_caches(): if __name__ == "__main__": - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) with LOGGER(True): ncf_print(key=TAGS.RUN_START) diff --git a/official/utils/misc/distribution_utils.py b/official/utils/misc/distribution_utils.py index fd5c7423e76..18949489873 100644 --- a/official/utils/misc/distribution_utils.py +++ b/official/utils/misc/distribution_utils.py @@ -23,6 +23,7 @@ import random import string import tensorflow.compat.v2 as tf +from absl import logging from official.utils.misc import tpu_lib @@ -252,7 +253,7 @@ def initialize(self): def _monkey_patch_dataset_method(strategy): """Monkey-patch `strategy`'s `make_dataset_iterator` method.""" def make_dataset(self, dataset): - tf.compat.v1.logging.info('Using pure synthetic data.') + logging.info('Using pure synthetic data.') with self.scope(): if self.extended._global_batch_size: # pylint: disable=protected-access return SyntheticDataset(dataset, self.num_replicas_in_sync) diff --git a/official/utils/misc/model_helpers.py b/official/utils/misc/model_helpers.py index c112bacd420..f484494d355 100644 --- a/official/utils/misc/model_helpers.py +++ b/official/utils/misc/model_helpers.py @@ -22,6 +22,7 @@ import tensorflow as tf from tensorflow.python.util import nest +from absl import logging def past_stop_threshold(stop_threshold, eval_metric): @@ -48,7 +49,7 @@ def past_stop_threshold(stop_threshold, eval_metric): "must be a number.") if eval_metric >= stop_threshold: - tf.compat.v1.logging.info( + logging.info( "Stop threshold of {} was passed with metric value {}.".format( stop_threshold, eval_metric)) return True @@ -88,6 +89,6 @@ def generate_synthetic_data( def apply_clean(flags_obj): if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir): - tf.compat.v1.logging.info("--clean flag set. Removing existing model dir:" + logging.info("--clean flag set. Removing existing model dir:" " {}".format(flags_obj.model_dir)) tf.io.gfile.rmtree(flags_obj.model_dir) diff --git a/official/utils/testing/perfzero_benchmark.py b/official/utils/testing/perfzero_benchmark.py index 3308a8290ca..546e6efb507 100644 --- a/official/utils/testing/perfzero_benchmark.py +++ b/official/utils/testing/perfzero_benchmark.py @@ -18,7 +18,7 @@ from __future__ import print_function import os - +from absl import logging from absl import flags from absl.testing import flagsaver import tensorflow as tf # pylint: disable=g-bad-import-order @@ -64,7 +64,7 @@ def _get_model_dir(self, folder_name): def _setup(self): """Sets up and resets flags before each test.""" - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + logging.set_verbosity(logging.INFO) if PerfZeroBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method() diff --git a/official/vision/image_classification/common_test.py b/official/vision/image_classification/common_test.py index a6a967e8aa6..4c76813d35a 100644 --- a/official/vision/image_classification/common_test.py +++ b/official/vision/image_classification/common_test.py @@ -21,6 +21,7 @@ import numpy as np import tensorflow as tf +from absl import logging from tensorflow.python.platform import googletest from official.utils.misc import keras_utils from official.vision.image_classification import common @@ -106,5 +107,5 @@ def _build_eval_output(self, top_1, eval_loss): return eval_output if __name__ == '__main__': - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + logging.set_verbosity(logging.ERROR) googletest.main() From 15e0057f88eec0f16eea237bff13bb07660025dc Mon Sep 17 00:00:00 2001 From: ayushmankumar7 Date: Thu, 19 Mar 2020 13:54:59 +0530 Subject: [PATCH 2/2] conflict resolved --- official/vision/image_classification/common_test.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/official/vision/image_classification/common_test.py b/official/vision/image_classification/common_test.py index 4c76813d35a..a6a967e8aa6 100644 --- a/official/vision/image_classification/common_test.py +++ b/official/vision/image_classification/common_test.py @@ -21,7 +21,6 @@ import numpy as np import tensorflow as tf -from absl import logging from tensorflow.python.platform import googletest from official.utils.misc import keras_utils from official.vision.image_classification import common @@ -107,5 +106,5 @@ def _build_eval_output(self, top_1, eval_loss): return eval_output if __name__ == '__main__': - logging.set_verbosity(logging.ERROR) + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) googletest.main()